summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /arch/ia64/kernel
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/.gitignore3
-rw-r--r--arch/ia64/kernel/Makefile46
-rw-r--r--arch/ia64/kernel/Makefile.gate29
-rw-r--r--arch/ia64/kernel/acpi-ext.c101
-rw-r--r--arch/ia64/kernel/acpi.c913
-rw-r--r--arch/ia64/kernel/asm-offsets.c289
-rw-r--r--arch/ia64/kernel/audit.c63
-rw-r--r--arch/ia64/kernel/brl_emu.c217
-rw-r--r--arch/ia64/kernel/crash.c257
-rw-r--r--arch/ia64/kernel/crash_dump.c27
-rw-r--r--arch/ia64/kernel/cyclone.c125
-rw-r--r--arch/ia64/kernel/dma-mapping.c9
-rw-r--r--arch/ia64/kernel/efi.c1360
-rw-r--r--arch/ia64/kernel/efi_stub.S87
-rw-r--r--arch/ia64/kernel/elfcore.c77
-rw-r--r--arch/ia64/kernel/entry.S1427
-rw-r--r--arch/ia64/kernel/entry.h83
-rw-r--r--arch/ia64/kernel/err_inject.c273
-rw-r--r--arch/ia64/kernel/esi.c193
-rw-r--r--arch/ia64/kernel/esi_stub.S99
-rw-r--r--arch/ia64/kernel/fsys.S837
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h30
-rw-r--r--arch/ia64/kernel/ftrace.c196
-rw-r--r--arch/ia64/kernel/gate-data.S3
-rw-r--r--arch/ia64/kernel/gate.S380
-rw-r--r--arch/ia64/kernel/gate.lds.S108
-rw-r--r--arch/ia64/kernel/head.S1167
-rw-r--r--arch/ia64/kernel/iosapic.c1137
-rw-r--r--arch/ia64/kernel/irq.c181
-rw-r--r--arch/ia64/kernel/irq.h3
-rw-r--r--arch/ia64/kernel/irq_ia64.c645
-rw-r--r--arch/ia64/kernel/irq_lsapic.c45
-rw-r--r--arch/ia64/kernel/ivt.S1688
-rw-r--r--arch/ia64/kernel/kprobes.c911
-rw-r--r--arch/ia64/kernel/machine_kexec.c163
-rw-r--r--arch/ia64/kernel/mca.c2111
-rw-r--r--arch/ia64/kernel/mca_asm.S1123
-rw-r--r--arch/ia64/kernel/mca_drv.c796
-rw-r--r--arch/ia64/kernel/mca_drv.h123
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S56
-rw-r--r--arch/ia64/kernel/minstate.h251
-rw-r--r--arch/ia64/kernel/module.c959
-rw-r--r--arch/ia64/kernel/msi_ia64.c198
-rw-r--r--arch/ia64/kernel/numa.c73
-rw-r--r--arch/ia64/kernel/pal.S306
-rw-r--r--arch/ia64/kernel/palinfo.c942
-rw-r--r--arch/ia64/kernel/patch.c237
-rw-r--r--arch/ia64/kernel/pci-dma.c33
-rw-r--r--arch/ia64/kernel/perfmon_itanium.h116
-rw-r--r--arch/ia64/kernel/process.c611
-rw-r--r--arch/ia64/kernel/ptrace.c2012
-rw-r--r--arch/ia64/kernel/relocate_kernel.S321
-rw-r--r--arch/ia64/kernel/sal.c400
-rw-r--r--arch/ia64/kernel/salinfo.c646
-rw-r--r--arch/ia64/kernel/setup.c1081
-rw-r--r--arch/ia64/kernel/sigframe.h26
-rw-r--r--arch/ia64/kernel/signal.c412
-rw-r--r--arch/ia64/kernel/smp.c335
-rw-r--r--arch/ia64/kernel/smpboot.c839
-rw-r--r--arch/ia64/kernel/stacktrace.c40
-rw-r--r--arch/ia64/kernel/sys_ia64.c197
-rw-r--r--arch/ia64/kernel/syscalls/Makefile32
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl375
-rw-r--r--arch/ia64/kernel/time.c463
-rw-r--r--arch/ia64/kernel/topology.c410
-rw-r--r--arch/ia64/kernel/traps.c612
-rw-r--r--arch/ia64/kernel/unaligned.c1560
-rw-r--r--arch/ia64/kernel/uncached.c273
-rw-r--r--arch/ia64/kernel/unwind.c2320
-rw-r--r--arch/ia64/kernel/unwind_decoder.c460
-rw-r--r--arch/ia64/kernel/unwind_i.h165
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S224
72 files changed, 0 insertions, 34310 deletions
diff --git a/arch/ia64/kernel/.gitignore b/arch/ia64/kernel/.gitignore
deleted file mode 100644
index 0374827206..0000000000
--- a/arch/ia64/kernel/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-gate.lds
-vmlinux.lds
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
deleted file mode 100644
index d7e1cabee2..0000000000
--- a/arch/ia64/kernel/Makefile
+++ /dev/null
@@ -1,46 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the linux kernel.
-#
-
-ifdef CONFIG_DYNAMIC_FTRACE
-CFLAGS_REMOVE_ftrace.o = -pg
-endif
-
-extra-y := vmlinux.lds
-
-obj-y := head.o entry.o efi.o efi_stub.o gate-data.o fsys.o irq.o irq_ia64.o \
- irq_lsapic.o ivt.o pal.o patch.o process.o ptrace.o sal.o \
- salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
- unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \
- acpi-ext.o
-
-obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
-
-obj-$(CONFIG_IA64_PALINFO) += palinfo.o
-obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_SMP) += smp.o smpboot.o
-obj-$(CONFIG_NUMA) += numa.o
-obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
-obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
-obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
-obj-$(CONFIG_AUDIT) += audit.o
-obj-y += msi_ia64.o
-mca_recovery-y += mca_drv.o mca_drv_asm.o
-obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
-obj-$(CONFIG_STACKTRACE) += stacktrace.o
-
-obj-$(CONFIG_IA64_ESI) += esi.o esi_stub.o # must be in kernel proper
-obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o
-
-obj-$(CONFIG_ELF_CORE) += elfcore.o
-
-# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
-CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
-
-# The gate DSO image is built using a special linker script.
-include $(srctree)/$(src)/Makefile.gate
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
deleted file mode 100644
index 846867bff6..0000000000
--- a/arch/ia64/kernel/Makefile.gate
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# The gate DSO image is built using a special linker script.
-
-targets += gate.so gate.lds gate.o gate-dummy.o
-
-obj-y += gate-syms.o
-
-CPPFLAGS_gate.lds := -P -C -U$(ARCH)
-
-quiet_cmd_gate = GATE $@
- cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
-
-GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
- -Wl,--hash-style=sysv
-$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
- $(call if_changed,gate)
-
-GATECFLAGS_gate-dummy.o = -r
-$(obj)/gate-dummy.o: $(obj)/gate.lds $(obj)/gate.o FORCE
- $(call if_changed,gate)
-
-LDFLAGS_gate-syms.o := -r -R
-$(obj)/gate-syms.o: $(obj)/gate-dummy.o FORCE
- $(call if_changed,ld)
-
-# gate-data.o contains the gate DSO image as data in section .data..gate.
-# We must build gate.so before we can assemble it.
-# Note: kbuild does not track this dependency due to usage of .incbin
-$(obj)/gate-data.o: $(obj)/gate.so
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
deleted file mode 100644
index 42cd214808..0000000000
--- a/arch/ia64/kernel/acpi-ext.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
- * Alex Williamson <alex.williamson@hp.com>
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
-
-#include <asm/acpi-ext.h>
-
-/*
- * Device CSRs that do not appear in PCI config space should be described
- * via ACPI. This would normally be done with Address Space Descriptors
- * marked as "consumer-only," but old versions of Windows and Linux ignore
- * the producer/consumer flag, so HP invented a vendor-defined resource to
- * describe the location and size of CSR space.
- */
-
-struct acpi_vendor_uuid hp_ccsr_uuid = {
- .subtype = 2,
- .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
- 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
-};
-
-static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length)
-{
- acpi_status status;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_resource *resource;
- struct acpi_resource_vendor_typed *vendor;
-
- status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid,
- &buffer);
-
- resource = buffer.pointer;
- vendor = &resource->data.vendor_typed;
-
- if (ACPI_FAILURE(status) || vendor->byte_length < 16) {
- status = AE_NOT_FOUND;
- goto exit;
- }
-
- memcpy(base, vendor->byte_data, sizeof(*base));
- memcpy(length, vendor->byte_data + 8, sizeof(*length));
-
- exit:
- kfree(buffer.pointer);
- return status;
-}
-
-struct csr_space {
- u64 base;
- u64 length;
-};
-
-static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
-{
- struct csr_space *space = data;
- struct acpi_resource_address64 addr;
- acpi_status status;
-
- status = acpi_resource_to_address64(resource, &addr);
- if (ACPI_SUCCESS(status) &&
- addr.resource_type == ACPI_MEMORY_RANGE &&
- addr.address.address_length &&
- addr.producer_consumer == ACPI_CONSUMER) {
- space->base = addr.address.minimum;
- space->length = addr.address.address_length;
- return AE_CTRL_TERMINATE;
- }
- return AE_OK; /* keep looking */
-}
-
-static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length)
-{
- struct csr_space space = { 0, 0 };
-
- acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space);
- if (!space.length)
- return AE_NOT_FOUND;
-
- *base = space.base;
- *length = space.length;
- return AE_OK;
-}
-
-acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
-{
- acpi_status status;
-
- status = hp_ccsr_locate(obj, csr_base, csr_length);
- if (ACPI_SUCCESS(status))
- return status;
-
- return hp_crs_locate(obj, csr_base, csr_length);
-}
-EXPORT_SYMBOL(hp_acpi_csr_space);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
deleted file mode 100644
index 41e8fe55cd..0000000000
--- a/arch/ia64/kernel/acpi.c
+++ /dev/null
@@ -1,913 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * acpi.c - Architecture-Specific Low-Level ACPI Support
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2000 Intel Corp.
- * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
- * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
- * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
- * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
- * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/irq.h>
-#include <linux/acpi.h>
-#include <linux/efi.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-#include <linux/slab.h>
-#include <acpi/processor.h>
-#include <asm/io.h>
-#include <asm/iosapic.h>
-#include <asm/page.h>
-#include <asm/numa.h>
-#include <asm/sal.h>
-#include <asm/cyclone.h>
-
-#define PREFIX "ACPI: "
-
-int acpi_lapic;
-unsigned int acpi_cpei_override;
-unsigned int acpi_cpei_phys_cpuid;
-
-#define ACPI_MAX_PLATFORM_INTERRUPTS 256
-
-/* Array to record platform interrupt vectors for generic interrupt routing. */
-int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
- [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
-};
-
-enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
-
-/*
- * Interrupt routing API for device drivers. Provides interrupt vector for
- * a generic platform event. Currently only CPEI is implemented.
- */
-int acpi_request_vector(u32 int_type)
-{
- int vector = -1;
-
- if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
- /* corrected platform error interrupt */
- vector = platform_intr_list[int_type];
- } else
- printk(KERN_ERR
- "acpi_request_vector(): invalid interrupt type\n");
- return vector;
-}
-
-void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
-{
- return __va(phys);
-}
-
-void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
-{
-}
-
-/* --------------------------------------------------------------------------
- Boot-time Table Parsing
- -------------------------------------------------------------------------- */
-
-static int available_cpus __initdata;
-struct acpi_table_madt *acpi_madt __initdata;
-static u8 has_8259;
-
-static int __init
-acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header,
- const unsigned long end)
-{
- struct acpi_madt_local_apic_override *lapic;
-
- lapic = (struct acpi_madt_local_apic_override *)header;
-
- if (BAD_MADT_ENTRY(lapic, end))
- return -EINVAL;
-
- if (lapic->address) {
- iounmap(ipi_base_addr);
- ipi_base_addr = ioremap(lapic->address, 0);
- }
- return 0;
-}
-
-static int __init
-acpi_parse_lsapic(union acpi_subtable_headers *header, const unsigned long end)
-{
- struct acpi_madt_local_sapic *lsapic;
-
- lsapic = (struct acpi_madt_local_sapic *)header;
-
- /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
-
- if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
-#ifdef CONFIG_SMP
- smp_boot_data.cpu_phys_id[available_cpus] =
- (lsapic->id << 8) | lsapic->eid;
-#endif
- ++available_cpus;
- }
-
- total_cpus++;
- return 0;
-}
-
-static int __init
-acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end)
-{
- struct acpi_madt_local_apic_nmi *lacpi_nmi;
-
- lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
-
- if (BAD_MADT_ENTRY(lacpi_nmi, end))
- return -EINVAL;
-
- /* TBD: Support lapic_nmi entries */
- return 0;
-}
-
-static int __init
-acpi_parse_iosapic(union acpi_subtable_headers * header, const unsigned long end)
-{
- struct acpi_madt_io_sapic *iosapic;
-
- iosapic = (struct acpi_madt_io_sapic *)header;
-
- if (BAD_MADT_ENTRY(iosapic, end))
- return -EINVAL;
-
- return iosapic_init(iosapic->address, iosapic->global_irq_base);
-}
-
-static unsigned int __initdata acpi_madt_rev;
-
-static int __init
-acpi_parse_plat_int_src(union acpi_subtable_headers * header,
- const unsigned long end)
-{
- struct acpi_madt_interrupt_source *plintsrc;
- int vector;
-
- plintsrc = (struct acpi_madt_interrupt_source *)header;
-
- if (BAD_MADT_ENTRY(plintsrc, end))
- return -EINVAL;
-
- /*
- * Get vector assignment for this interrupt, set attributes,
- * and program the IOSAPIC routing table.
- */
- vector = iosapic_register_platform_intr(plintsrc->type,
- plintsrc->global_irq,
- plintsrc->io_sapic_vector,
- plintsrc->eid,
- plintsrc->id,
- ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
- ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
- IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
- ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
- ACPI_MADT_TRIGGER_EDGE) ?
- IOSAPIC_EDGE : IOSAPIC_LEVEL);
-
- platform_intr_list[plintsrc->type] = vector;
- if (acpi_madt_rev > 1) {
- acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
- }
-
- /*
- * Save the physical id, so we can check when its being removed
- */
- acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
-
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-unsigned int can_cpei_retarget(void)
-{
- extern int cpe_vector;
- extern unsigned int force_cpei_retarget;
-
- /*
- * Only if CPEI is supported and the override flag
- * is present, otherwise return that its re-targettable
- * if we are in polling mode.
- */
- if (cpe_vector > 0) {
- if (acpi_cpei_override || force_cpei_retarget)
- return 1;
- else
- return 0;
- }
- return 1;
-}
-
-unsigned int is_cpu_cpei_target(unsigned int cpu)
-{
- unsigned int logical_id;
-
- logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
-
- if (logical_id == cpu)
- return 1;
- else
- return 0;
-}
-
-void set_cpei_target_cpu(unsigned int cpu)
-{
- acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
-}
-#endif
-
-unsigned int get_cpei_target_cpu(void)
-{
- return acpi_cpei_phys_cpuid;
-}
-
-static int __init
-acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
- const unsigned long end)
-{
- struct acpi_madt_interrupt_override *p;
-
- p = (struct acpi_madt_interrupt_override *)header;
-
- if (BAD_MADT_ENTRY(p, end))
- return -EINVAL;
-
- iosapic_override_isa_irq(p->source_irq, p->global_irq,
- ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
- ACPI_MADT_POLARITY_ACTIVE_LOW) ?
- IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH,
- ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
- ACPI_MADT_TRIGGER_LEVEL) ?
- IOSAPIC_LEVEL : IOSAPIC_EDGE);
- return 0;
-}
-
-static int __init
-acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end)
-{
- struct acpi_madt_nmi_source *nmi_src;
-
- nmi_src = (struct acpi_madt_nmi_source *)header;
-
- if (BAD_MADT_ENTRY(nmi_src, end))
- return -EINVAL;
-
- /* TBD: Support nimsrc entries */
- return 0;
-}
-
-static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
-
- /*
- * Unfortunately ITC_DRIFT is not yet part of the
- * official SAL spec, so the ITC_DRIFT bit is not
- * set by the BIOS on this hardware.
- */
- sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
-
- cyclone_setup();
- }
-}
-
-static int __init acpi_parse_madt(struct acpi_table_header *table)
-{
- acpi_madt = (struct acpi_table_madt *)table;
-
- acpi_madt_rev = acpi_madt->header.revision;
-
- /* remember the value for reference after free_initmem() */
-#ifdef CONFIG_ITANIUM
- has_8259 = 1; /* Firmware on old Itanium systems is broken */
-#else
- has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
-#endif
- iosapic_system_init(has_8259);
-
- /* Get base address of IPI Message Block */
-
- if (acpi_madt->address)
- ipi_base_addr = ioremap(acpi_madt->address, 0);
-
- printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
-
- acpi_madt_oem_check(acpi_madt->header.oem_id,
- acpi_madt->header.oem_table_id);
-
- return 0;
-}
-
-#ifdef CONFIG_ACPI_NUMA
-
-#undef SLIT_DEBUG
-
-#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
-
-static int __initdata srat_num_cpus; /* number of cpus */
-static u32 pxm_flag[PXM_FLAG_LEN];
-#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
-#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
-static struct acpi_table_slit __initdata *slit_table;
-cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
-
-static int __init
-get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
-{
- int pxm;
-
- pxm = pa->proximity_domain_lo;
- if (acpi_srat_revision >= 2)
- pxm += pa->proximity_domain_hi[0] << 8;
- return pxm;
-}
-
-static int __init
-get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
-{
- int pxm;
-
- pxm = ma->proximity_domain;
- if (acpi_srat_revision <= 1)
- pxm &= 0xff;
-
- return pxm;
-}
-
-/*
- * ACPI 2.0 SLIT (System Locality Information Table)
- * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
- */
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
-{
- u32 len;
-
- len = sizeof(struct acpi_table_header) + 8
- + slit->locality_count * slit->locality_count;
- if (slit->header.length != len) {
- printk(KERN_ERR
- "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
- len, slit->header.length);
- return;
- }
- slit_table = slit;
-}
-
-void __init
-acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
-{
- int pxm;
-
- if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
- return;
-
- if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
- printk_once(KERN_WARNING
- "node_cpuid[%ld] is too small, may not be able to use all cpus\n",
- ARRAY_SIZE(node_cpuid));
- return;
- }
- pxm = get_processor_proximity_domain(pa);
-
- /* record this node in proximity bitmap */
- pxm_bit_set(pxm);
-
- node_cpuid[srat_num_cpus].phys_id =
- (pa->apic_id << 8) | (pa->local_sapic_eid);
- /* nid should be overridden as logical node id later */
- node_cpuid[srat_num_cpus].nid = pxm;
- cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
- srat_num_cpus++;
-}
-
-int __init
-acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
-{
- unsigned long paddr, size;
- int pxm;
- struct node_memblk_s *p, *q, *pend;
-
- pxm = get_memory_proximity_domain(ma);
-
- /* fill node memory chunk structure */
- paddr = ma->base_address;
- size = ma->length;
-
- /* Ignore disabled entries */
- if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
- return -1;
-
- if (num_node_memblks >= NR_NODE_MEMBLKS) {
- pr_err("NUMA: too many memblk ranges\n");
- return -EINVAL;
- }
-
- /* record this node in proximity bitmap */
- pxm_bit_set(pxm);
-
- /* Insertion sort based on base address */
- pend = &node_memblk[num_node_memblks];
- for (p = &node_memblk[0]; p < pend; p++) {
- if (paddr < p->start_paddr)
- break;
- }
- if (p < pend) {
- for (q = pend - 1; q >= p; q--)
- *(q + 1) = *q;
- }
- p->start_paddr = paddr;
- p->size = size;
- p->nid = pxm;
- num_node_memblks++;
- return 0;
-}
-
-void __init acpi_numa_fixup(void)
-{
- int i, j, node_from, node_to;
-
- /* If there's no SRAT, fix the phys_id and mark node 0 online */
- if (srat_num_cpus == 0) {
- node_set_online(0);
- node_cpuid[0].phys_id = hard_smp_processor_id();
- slit_distance(0, 0) = LOCAL_DISTANCE;
- goto out;
- }
-
- /*
- * MCD - This can probably be dropped now. No need for pxm ID to node ID
- * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
- */
- nodes_clear(node_online_map);
- for (i = 0; i < MAX_PXM_DOMAINS; i++) {
- if (pxm_bit_test(i)) {
- int nid = acpi_map_pxm_to_node(i);
- node_set_online(nid);
- }
- }
-
- /* set logical node id in memory chunk structure */
- for (i = 0; i < num_node_memblks; i++)
- node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
-
- /* assign memory bank numbers for each chunk on each node */
- for_each_online_node(i) {
- int bank;
-
- bank = 0;
- for (j = 0; j < num_node_memblks; j++)
- if (node_memblk[j].nid == i)
- node_memblk[j].bank = bank++;
- }
-
- /* set logical node id in cpu structure */
- for_each_possible_early_cpu(i)
- node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
-
- printk(KERN_INFO "Number of logical nodes in system = %d\n",
- num_online_nodes());
- printk(KERN_INFO "Number of memory chunks in system = %d\n",
- num_node_memblks);
-
- if (!slit_table) {
- for (i = 0; i < MAX_NUMNODES; i++)
- for (j = 0; j < MAX_NUMNODES; j++)
- slit_distance(i, j) = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
- goto out;
- }
-
- memset(numa_slit, -1, sizeof(numa_slit));
- for (i = 0; i < slit_table->locality_count; i++) {
- if (!pxm_bit_test(i))
- continue;
- node_from = pxm_to_node(i);
- for (j = 0; j < slit_table->locality_count; j++) {
- if (!pxm_bit_test(j))
- continue;
- node_to = pxm_to_node(j);
- slit_distance(node_from, node_to) =
- slit_table->entry[i * slit_table->locality_count + j];
- }
- }
-
-#ifdef SLIT_DEBUG
- printk("ACPI 2.0 SLIT locality table:\n");
- for_each_online_node(i) {
- for_each_online_node(j)
- printk("%03d ", node_distance(i, j));
- printk("\n");
- }
-#endif
-out:
- node_possible_map = node_online_map;
-}
-#endif /* CONFIG_ACPI_NUMA */
-
-/*
- * success: return IRQ number (>=0)
- * failure: return < 0
- */
-int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity)
-{
- if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
- return gsi;
-
- if (has_8259 && gsi < 16)
- return isa_irq_to_vector(gsi);
-
- return iosapic_register_intr(gsi,
- (polarity ==
- ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
- IOSAPIC_POL_LOW,
- (triggering ==
- ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
- IOSAPIC_LEVEL);
-}
-EXPORT_SYMBOL_GPL(acpi_register_gsi);
-
-void acpi_unregister_gsi(u32 gsi)
-{
- if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
- return;
-
- if (has_8259 && gsi < 16)
- return;
-
- iosapic_unregister_intr(gsi);
-}
-EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
-
-static int __init acpi_parse_fadt(struct acpi_table_header *table)
-{
- struct acpi_table_header *fadt_header;
- struct acpi_table_fadt *fadt;
-
- fadt_header = (struct acpi_table_header *)table;
- if (fadt_header->revision != 3)
- return -ENODEV; /* Only deal with ACPI 2.0 FADT */
-
- fadt = (struct acpi_table_fadt *)fadt_header;
-
- acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
- ACPI_ACTIVE_LOW);
- return 0;
-}
-
-int __init early_acpi_boot_init(void)
-{
- int ret;
-
- /*
- * do a partial walk of MADT to determine how many CPUs
- * we have including offline CPUs
- */
- if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
- printk(KERN_ERR PREFIX "Can't find MADT\n");
- return 0;
- }
-
- ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
- acpi_parse_lsapic, NR_CPUS);
- if (ret < 1)
- printk(KERN_ERR PREFIX
- "Error parsing MADT - no LAPIC entries\n");
- else
- acpi_lapic = 1;
-
-#ifdef CONFIG_SMP
- if (available_cpus == 0) {
- printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
- printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
- smp_boot_data.cpu_phys_id[available_cpus] =
- hard_smp_processor_id();
- available_cpus = 1; /* We've got at least one of these, no? */
- }
- smp_boot_data.cpu_count = available_cpus;
-#endif
- /* Make boot-up look pretty */
- printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
- total_cpus);
-
- return 0;
-}
-
-int __init acpi_boot_init(void)
-{
-
- /*
- * MADT
- * ----
- * Parse the Multiple APIC Description Table (MADT), if exists.
- * Note that this table provides platform SMP configuration
- * information -- the successor to MPS tables.
- */
-
- if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
- printk(KERN_ERR PREFIX "Can't find MADT\n");
- goto skip_madt;
- }
-
- /* Local APIC */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
- < 0)
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
-
- /* I/O APIC */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) {
- printk(KERN_ERR PREFIX
- "Error parsing MADT - no IOSAPIC entries\n");
- }
-
- /* System-Level Interrupt Routing */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
- ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
- printk(KERN_ERR PREFIX
- "Error parsing platform interrupt source entry\n");
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
- printk(KERN_ERR PREFIX
- "Error parsing interrupt source overrides entry\n");
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
- skip_madt:
-
- /*
- * FADT says whether a legacy keyboard controller is present.
- * The FADT also contains an SCI_INT line, by which the system
- * gets interrupts such as power and sleep buttons. If it's not
- * on a Legacy interrupt, it needs to be setup.
- */
- if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
- printk(KERN_ERR PREFIX "Can't find FADT\n");
-
-#ifdef CONFIG_ACPI_NUMA
-#ifdef CONFIG_SMP
- if (srat_num_cpus == 0) {
- int cpu, i = 1;
- for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
- if (smp_boot_data.cpu_phys_id[cpu] !=
- hard_smp_processor_id())
- node_cpuid[i++].phys_id =
- smp_boot_data.cpu_phys_id[cpu];
- }
-#endif
- build_cpu_to_node_map();
-#endif
- return 0;
-}
-
-int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-{
- int tmp;
-
- if (has_8259 && gsi < 16)
- *irq = isa_irq_to_vector(gsi);
- else {
- tmp = gsi_to_irq(gsi);
- if (tmp == -1)
- return -1;
- *irq = tmp;
- }
- return 0;
-}
-
-int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
-{
- if (isa_irq >= 16)
- return -1;
- *gsi = isa_irq;
- return 0;
-}
-
-/*
- * ACPI based hotplug CPU support
- */
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
-#ifdef CONFIG_ACPI_NUMA
- /*
- * We don't have cpu-only-node hotadd. But if the system equips
- * SRAT table, pxm is already found and node is ready.
- * So, just pxm_to_nid(pxm) is OK.
- * This code here is for the system which doesn't have full SRAT
- * table for possible cpus.
- */
- node_cpuid[cpu].phys_id = physid;
- node_cpuid[cpu].nid = acpi_get_node(handle);
-#endif
- return 0;
-}
-
-int additional_cpus __initdata = -1;
-
-static __init int setup_additional_cpus(char *s)
-{
- if (s)
- additional_cpus = simple_strtol(s, NULL, 0);
-
- return 0;
-}
-
-early_param("additional_cpus", setup_additional_cpus);
-
-/*
- * cpu_possible_mask should be static, it cannot change as CPUs
- * are onlined, or offlined. The reason is per-cpu data-structures
- * are allocated by some modules at init time, and dont expect to
- * do this dynamically on cpu arrival/departure.
- * cpu_present_mask on the other hand can change dynamically.
- * In case when cpu_hotplug is not compiled, then we resort to current
- * behaviour, which is cpu_possible == cpu_present.
- * - Ashok Raj
- *
- * Three ways to find out the number of additional hotplug CPUs:
- * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
- * - The user can overwrite it with additional_cpus=NUM
- * - Otherwise don't reserve additional CPUs.
- */
-__init void prefill_possible_map(void)
-{
- int i;
- int possible, disabled_cpus;
-
- disabled_cpus = total_cpus - available_cpus;
-
- if (additional_cpus == -1) {
- if (disabled_cpus > 0)
- additional_cpus = disabled_cpus;
- else
- additional_cpus = 0;
- }
-
- possible = available_cpus + additional_cpus;
-
- if (possible > nr_cpu_ids)
- possible = nr_cpu_ids;
-
- printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
- possible, max((possible - available_cpus), 0));
-
- for (i = 0; i < possible; i++)
- set_cpu_possible(i, true);
-}
-
-static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
-{
- int cpu;
-
- cpu = cpumask_first_zero(cpu_present_mask);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
-
- acpi_map_cpu2node(handle, cpu, physid);
-
- set_cpu_present(cpu, true);
- ia64_cpu_to_sapicid[cpu] = physid;
-
- acpi_processor_set_pdc(handle);
-
- *pcpu = cpu;
- return (0);
-}
-
-/* wrapper to silence section mismatch warning */
-int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
- int *pcpu)
-{
- return _acpi_map_lsapic(handle, physid, pcpu);
-}
-EXPORT_SYMBOL(acpi_map_cpu);
-
-int acpi_unmap_cpu(int cpu)
-{
- ia64_cpu_to_sapicid[cpu] = -1;
- set_cpu_present(cpu, false);
-
-#ifdef CONFIG_ACPI_NUMA
- /* NUMA specific cleanup's */
-#endif
-
- return (0);
-}
-EXPORT_SYMBOL(acpi_unmap_cpu);
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
-#ifdef CONFIG_ACPI_NUMA
-static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth,
- void *context, void **ret)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_io_sapic *iosapic;
- unsigned int gsi_base;
- int node;
-
- /* Only care about objects w/ a method that returns the MADT */
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return AE_OK;
-
- if (!buffer.length || !buffer.pointer)
- return AE_OK;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER ||
- obj->buffer.length < sizeof(*iosapic)) {
- kfree(buffer.pointer);
- return AE_OK;
- }
-
- iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
-
- if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
- kfree(buffer.pointer);
- return AE_OK;
- }
-
- gsi_base = iosapic->global_irq_base;
-
- kfree(buffer.pointer);
-
- /* OK, it's an IOSAPIC MADT entry; associate it with a node */
- node = acpi_get_node(handle);
- if (node == NUMA_NO_NODE || !node_online(node) ||
- cpumask_empty(cpumask_of_node(node)))
- return AE_OK;
-
- /* We know a gsi to node mapping! */
- map_iosapic_to_node(gsi_base, node);
- return AE_OK;
-}
-
-static int __init
-acpi_map_iosapics (void)
-{
- acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
- return 0;
-}
-
-fs_initcall(acpi_map_iosapics);
-#endif /* CONFIG_ACPI_NUMA */
-
-int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
-{
- int err;
-
- if ((err = iosapic_init(phys_addr, gsi_base)))
- return err;
-
-#ifdef CONFIG_ACPI_NUMA
- acpi_map_iosapic(handle, 0, NULL, NULL);
-#endif /* CONFIG_ACPI_NUMA */
-
- return 0;
-}
-
-EXPORT_SYMBOL(acpi_register_ioapic);
-
-int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
-{
- return iosapic_remove(gsi_base);
-}
-
-EXPORT_SYMBOL(acpi_unregister_ioapic);
-
-/*
- * acpi_suspend_lowlevel() - save kernel state and suspend.
- *
- * TBD when IA64 starts to support suspend...
- */
-int acpi_suspend_lowlevel(void) { return 0; }
-
-void acpi_proc_quirk_mwait_check(void)
-{
-}
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
deleted file mode 100644
index be3b90fef2..0000000000
--- a/arch/ia64/kernel/asm-offsets.c
+++ /dev/null
@@ -1,289 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-
-#define ASM_OFFSETS_C 1
-
-#include <linux/sched/signal.h>
-#include <linux/pid.h>
-#include <linux/clocksource.h>
-#include <linux/kbuild.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/siginfo.h>
-#include <asm/sigcontext.h>
-#include <asm/mca.h>
-
-#include "../kernel/sigframe.h"
-#include "../kernel/fsyscall_gtod_data.h"
-
-void foo(void)
-{
- DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
- DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info));
- DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs));
- DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
- DEFINE(IA64_SIGINFO_SIZE, sizeof (struct siginfo));
- DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
- DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
- DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
-
- BUILD_BUG_ON(sizeof(struct upid) != 16);
- DEFINE(IA64_UPID_SHIFT, 4);
-
- BLANK();
-
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
- DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
- DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
- DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
- DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime));
-#endif
-
- BLANK();
-
- DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
- DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
- DEFINE(IA64_TASK_THREAD_PID_OFFSET,offsetof (struct task_struct, thread_pid));
- DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level));
- DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0]));
- DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
- DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
- DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
- DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
- DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
- DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
- DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack));
-
- BLANK();
-
-
- DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct,
- group_stop_count));
- DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending));
- DEFINE(IA64_SIGNAL_PIDS_TGID_OFFSET, offsetof (struct signal_struct, pids[PIDTYPE_TGID]));
-
- BLANK();
-
- DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6));
- DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7));
- DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd));
- DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd));
- DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8));
- DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9));
- DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10));
- DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11));
- DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr));
- DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip));
- DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs));
- DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat));
- DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs));
- DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc));
- DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat));
-
- DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs, ar_bspstore));
- DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr));
- DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0));
- DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs));
- DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1));
- DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12));
- DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13));
- DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr));
- DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15));
- DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
- DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
- DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
- DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
- DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
- DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
- DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19));
- DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20));
- DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21));
- DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22));
- DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23));
- DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24));
- DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25));
- DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26));
- DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27));
- DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28));
- DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29));
- DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30));
- DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31));
- DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv));
- DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6));
- DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7));
- DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8));
- DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
- DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
- DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
-
- BLANK();
-
- DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct switch_stack, caller_unat));
- DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack, ar_fpsr));
- DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2));
- DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3));
- DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4));
- DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5));
- DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack, f12));
- DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack, f13));
- DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack, f14));
- DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack, f15));
- DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack, f16));
- DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack, f17));
- DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack, f18));
- DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack, f19));
- DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack, f20));
- DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack, f21));
- DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack, f22));
- DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack, f23));
- DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack, f24));
- DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack, f25));
- DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack, f26));
- DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack, f27));
- DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack, f28));
- DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack, f29));
- DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack, f30));
- DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack, f31));
- DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4));
- DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5));
- DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6));
- DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7));
- DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0));
- DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1));
- DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2));
- DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3));
- DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4));
- DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5));
- DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack, ar_pfs));
- DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack, ar_lc));
- DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack, ar_unat));
- DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack, ar_rnat));
- DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct switch_stack, ar_bspstore));
- DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr));
-
- BLANK();
-
- DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip));
- DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp));
- DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
- DEFINE(IA64_SIGCONTEXT_AR_RNAT_OFFSET, offsetof (struct sigcontext, sc_ar_rnat));
- DEFINE(IA64_SIGCONTEXT_AR_UNAT_OFFSET, offsetof (struct sigcontext, sc_ar_unat));
- DEFINE(IA64_SIGCONTEXT_B0_OFFSET, offsetof (struct sigcontext, sc_br[0]));
- DEFINE(IA64_SIGCONTEXT_CFM_OFFSET, offsetof (struct sigcontext, sc_cfm));
- DEFINE(IA64_SIGCONTEXT_FLAGS_OFFSET, offsetof (struct sigcontext, sc_flags));
- DEFINE(IA64_SIGCONTEXT_FR6_OFFSET, offsetof (struct sigcontext, sc_fr[6]));
- DEFINE(IA64_SIGCONTEXT_PR_OFFSET, offsetof (struct sigcontext, sc_pr));
- DEFINE(IA64_SIGCONTEXT_R12_OFFSET, offsetof (struct sigcontext, sc_gr[12]));
- DEFINE(IA64_SIGCONTEXT_RBS_BASE_OFFSET,offsetof (struct sigcontext, sc_rbs_base));
- DEFINE(IA64_SIGCONTEXT_LOADRS_OFFSET, offsetof (struct sigcontext, sc_loadrs));
-
- BLANK();
-
- DEFINE(IA64_SIGPENDING_SIGNAL_OFFSET, offsetof (struct sigpending, signal));
-
- BLANK();
-
- DEFINE(IA64_SIGFRAME_ARG0_OFFSET, offsetof (struct sigframe, arg0));
- DEFINE(IA64_SIGFRAME_ARG1_OFFSET, offsetof (struct sigframe, arg1));
- DEFINE(IA64_SIGFRAME_ARG2_OFFSET, offsetof (struct sigframe, arg2));
- DEFINE(IA64_SIGFRAME_HANDLER_OFFSET, offsetof (struct sigframe, handler));
- DEFINE(IA64_SIGFRAME_SIGCONTEXT_OFFSET, offsetof (struct sigframe, sc));
- BLANK();
- /* for assembly files which can't include sched.h: */
- DEFINE(IA64_CLONE_VFORK, CLONE_VFORK);
- DEFINE(IA64_CLONE_VM, CLONE_VM);
-
- BLANK();
- DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET,
- offsetof (struct cpuinfo_ia64, nsec_per_cyc));
- DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET,
- offsetof (struct cpuinfo_ia64, ptce_base));
- DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET,
- offsetof (struct cpuinfo_ia64, ptce_count));
- DEFINE(IA64_CPUINFO_PTCE_STRIDE_OFFSET,
- offsetof (struct cpuinfo_ia64, ptce_stride));
- BLANK();
- DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
- offsetof (struct __kernel_old_timespec, tv_nsec));
- DEFINE(IA64_TIME_SN_SPEC_SNSEC_OFFSET,
- offsetof (struct time_sn_spec, snsec));
-
- DEFINE(CLONE_SETTLS_BIT, 19);
-#if CLONE_SETTLS != (1<<19)
-# error "CLONE_SETTLS_BIT incorrect, please fix"
-#endif
-
- BLANK();
- DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET,
- offsetof (struct ia64_mca_cpu, mca_stack));
- DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
- offsetof (struct ia64_mca_cpu, init_stack));
- BLANK();
- DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
- offsetof (struct ia64_sal_os_state, os_gp));
- DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
- offsetof (struct ia64_sal_os_state, proc_state_param));
- DEFINE(IA64_SAL_OS_STATE_SAL_RA_OFFSET,
- offsetof (struct ia64_sal_os_state, sal_ra));
- DEFINE(IA64_SAL_OS_STATE_SAL_GP_OFFSET,
- offsetof (struct ia64_sal_os_state, sal_gp));
- DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
- offsetof (struct ia64_sal_os_state, pal_min_state));
- DEFINE(IA64_SAL_OS_STATE_OS_STATUS_OFFSET,
- offsetof (struct ia64_sal_os_state, os_status));
- DEFINE(IA64_SAL_OS_STATE_CONTEXT_OFFSET,
- offsetof (struct ia64_sal_os_state, context));
- DEFINE(IA64_SAL_OS_STATE_SIZE,
- sizeof (struct ia64_sal_os_state));
- BLANK();
-
- DEFINE(IA64_PMSA_GR_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_gr));
- DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_bank1_gr));
- DEFINE(IA64_PMSA_PR_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_pr));
- DEFINE(IA64_PMSA_BR0_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_br0));
- DEFINE(IA64_PMSA_RSC_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_rsc));
- DEFINE(IA64_PMSA_IIP_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_iip));
- DEFINE(IA64_PMSA_IPSR_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_ipsr));
- DEFINE(IA64_PMSA_IFS_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_ifs));
- DEFINE(IA64_PMSA_XIP_OFFSET,
- offsetof(struct pal_min_state_area, pmsa_xip));
- BLANK();
-
- /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
- DEFINE(IA64_GTOD_SEQ_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, seq));
- DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, wall_time));
- DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, monotonic_time));
- DEFINE(IA64_CLKSRC_MASK_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, clk_mask));
- DEFINE(IA64_CLKSRC_MULT_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, clk_mult));
- DEFINE(IA64_CLKSRC_SHIFT_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, clk_shift));
- DEFINE(IA64_CLKSRC_MMIO_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio));
- DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, clk_cycle_last));
- DEFINE(IA64_ITC_JITTER_OFFSET,
- offsetof (struct itc_jitter_data_t, itc_jitter));
- DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
- offsetof (struct itc_jitter_data_t, itc_lastcycle));
-
-}
diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c
deleted file mode 100644
index ec61f20ca6..0000000000
--- a/arch/ia64/kernel/audit.c
+++ /dev/null
@@ -1,63 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/audit.h>
-#include <asm/unistd.h>
-
-static unsigned dir_class[] = {
-#include <asm-generic/audit_dir_write.h>
-~0U
-};
-
-static unsigned read_class[] = {
-#include <asm-generic/audit_read.h>
-~0U
-};
-
-static unsigned write_class[] = {
-#include <asm-generic/audit_write.h>
-~0U
-};
-
-static unsigned chattr_class[] = {
-#include <asm-generic/audit_change_attr.h>
-~0U
-};
-
-static unsigned signal_class[] = {
-#include <asm-generic/audit_signal.h>
-~0U
-};
-
-int audit_classify_arch(int arch)
-{
- return 0;
-}
-
-int audit_classify_syscall(int abi, unsigned syscall)
-{
- switch(syscall) {
- case __NR_open:
- return AUDITSC_OPEN;
- case __NR_openat:
- return AUDITSC_OPENAT;
- case __NR_execve:
- return AUDITSC_EXECVE;
- case __NR_openat2:
- return AUDITSC_OPENAT2;
- default:
- return AUDITSC_NATIVE;
- }
-}
-
-static int __init audit_classes_init(void)
-{
- audit_register_class(AUDIT_CLASS_WRITE, write_class);
- audit_register_class(AUDIT_CLASS_READ, read_class);
- audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
- audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
- audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
- return 0;
-}
-
-__initcall(audit_classes_init);
diff --git a/arch/ia64/kernel/brl_emu.c b/arch/ia64/kernel/brl_emu.c
deleted file mode 100644
index 782c481d70..0000000000
--- a/arch/ia64/kernel/brl_emu.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Emulation of the "brl" instruction for IA64 processors that
- * don't support it in hardware.
- * Author: Stephan Zeisset, Intel Corp. <Stephan.Zeisset@intel.com>
- *
- * 02/22/02 D. Mosberger Clear si_flgs, si_isr, and si_imm to avoid
- * leaking kernel bits.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/uaccess.h>
-#include <asm/processor.h>
-
-extern char ia64_set_b1, ia64_set_b2, ia64_set_b3, ia64_set_b4, ia64_set_b5;
-
-struct illegal_op_return {
- unsigned long fkt, arg1, arg2, arg3;
-};
-
-/*
- * The unimplemented bits of a virtual address must be set
- * to the value of the most significant implemented bit.
- * unimpl_va_mask includes all unimplemented bits and
- * the most significant implemented bit, so the result
- * of an and operation with the mask must be all 0's
- * or all 1's for the address to be valid.
- */
-#define unimplemented_virtual_address(va) ( \
- ((va) & local_cpu_data->unimpl_va_mask) != 0 && \
- ((va) & local_cpu_data->unimpl_va_mask) != local_cpu_data->unimpl_va_mask \
-)
-
-/*
- * The unimplemented bits of a physical address must be 0.
- * unimpl_pa_mask includes all unimplemented bits, so the result
- * of an and operation with the mask must be all 0's for the
- * address to be valid.
- */
-#define unimplemented_physical_address(pa) ( \
- ((pa) & local_cpu_data->unimpl_pa_mask) != 0 \
-)
-
-/*
- * Handle an illegal operation fault that was caused by an
- * unimplemented "brl" instruction.
- * If we are not successful (e.g because the illegal operation
- * wasn't caused by a "brl" after all), we return -1.
- * If we are successful, we return either 0 or the address
- * of a "fixup" function for manipulating preserved register
- * state.
- */
-
-struct illegal_op_return
-ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
-{
- unsigned long bundle[2];
- unsigned long opcode, btype, qp, offset, cpl;
- unsigned long next_ip;
- struct illegal_op_return rv;
- long tmp_taken, unimplemented_address;
-
- rv.fkt = (unsigned long) -1;
-
- /*
- * Decode the instruction bundle.
- */
-
- if (copy_from_user(bundle, (void *) (regs->cr_iip), sizeof(bundle)))
- return rv;
-
- next_ip = (unsigned long) regs->cr_iip + 16;
-
- /* "brl" must be in slot 2. */
- if (ia64_psr(regs)->ri != 1) return rv;
-
- /* Must be "mlx" template */
- if ((bundle[0] & 0x1e) != 0x4) return rv;
-
- opcode = (bundle[1] >> 60);
- btype = ((bundle[1] >> 29) & 0x7);
- qp = ((bundle[1] >> 23) & 0x3f);
- offset = ((bundle[1] & 0x0800000000000000L) << 4)
- | ((bundle[1] & 0x00fffff000000000L) >> 32)
- | ((bundle[1] & 0x00000000007fffffL) << 40)
- | ((bundle[0] & 0xffff000000000000L) >> 24);
-
- tmp_taken = regs->pr & (1L << qp);
-
- switch(opcode) {
-
- case 0xC:
- /*
- * Long Branch.
- */
- if (btype != 0) return rv;
- rv.fkt = 0;
- if (!(tmp_taken)) {
- /*
- * Qualifying predicate is 0.
- * Skip instruction.
- */
- regs->cr_iip = next_ip;
- ia64_psr(regs)->ri = 0;
- return rv;
- }
- break;
-
- case 0xD:
- /*
- * Long Call.
- */
- rv.fkt = 0;
- if (!(tmp_taken)) {
- /*
- * Qualifying predicate is 0.
- * Skip instruction.
- */
- regs->cr_iip = next_ip;
- ia64_psr(regs)->ri = 0;
- return rv;
- }
-
- /*
- * BR[btype] = IP+16
- */
- switch(btype) {
- case 0:
- regs->b0 = next_ip;
- break;
- case 1:
- rv.fkt = (unsigned long) &ia64_set_b1;
- break;
- case 2:
- rv.fkt = (unsigned long) &ia64_set_b2;
- break;
- case 3:
- rv.fkt = (unsigned long) &ia64_set_b3;
- break;
- case 4:
- rv.fkt = (unsigned long) &ia64_set_b4;
- break;
- case 5:
- rv.fkt = (unsigned long) &ia64_set_b5;
- break;
- case 6:
- regs->b6 = next_ip;
- break;
- case 7:
- regs->b7 = next_ip;
- break;
- }
- rv.arg1 = next_ip;
-
- /*
- * AR[PFS].pfm = CFM
- * AR[PFS].pec = AR[EC]
- * AR[PFS].ppl = PSR.cpl
- */
- cpl = ia64_psr(regs)->cpl;
- regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff)
- | (ar_ec << 52) | (cpl << 62));
-
- /*
- * CFM.sof -= CFM.sol
- * CFM.sol = 0
- * CFM.sor = 0
- * CFM.rrb.gr = 0
- * CFM.rrb.fr = 0
- * CFM.rrb.pr = 0
- */
- regs->cr_ifs = ((regs->cr_ifs & 0xffffffc00000007f)
- - ((regs->cr_ifs >> 7) & 0x7f));
-
- break;
-
- default:
- /*
- * Unknown opcode.
- */
- return rv;
-
- }
-
- regs->cr_iip += offset;
- ia64_psr(regs)->ri = 0;
-
- if (ia64_psr(regs)->it == 0)
- unimplemented_address = unimplemented_physical_address(regs->cr_iip);
- else
- unimplemented_address = unimplemented_virtual_address(regs->cr_iip);
-
- if (unimplemented_address) {
- /*
- * The target address contains unimplemented bits.
- */
- printk(KERN_DEBUG "Woah! Unimplemented Instruction Address Trap!\n");
- force_sig_fault(SIGILL, ILL_BADIADDR, (void __user *)NULL,
- 0, 0, 0);
- } else if (ia64_psr(regs)->tb) {
- /*
- * Branch Tracing is enabled.
- * Force a taken branch signal.
- */
- force_sig_fault(SIGTRAP, TRAP_BRANCH, (void __user *)NULL,
- 0, 0, 0);
- } else if (ia64_psr(regs)->ss) {
- /*
- * Single Step is enabled.
- * Force a trace signal.
- */
- force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)NULL,
- 0, 0, 0);
- }
- return rv;
-}
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
deleted file mode 100644
index 88b3ce3e66..0000000000
--- a/arch/ia64/kernel/crash.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/ia64/kernel/crash.c
- *
- * Architecture specific (ia64) functions for kexec based crash dumps.
- *
- * Created by: Khalid Aziz <khalid.aziz@hp.com>
- * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
- * Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com>
- *
- */
-#include <linux/smp.h>
-#include <linux/delay.h>
-#include <linux/crash_dump.h>
-#include <linux/memblock.h>
-#include <linux/kexec.h>
-#include <linux/elfcore.h>
-#include <linux/reboot.h>
-#include <linux/sysctl.h>
-#include <linux/init.h>
-#include <linux/kdebug.h>
-
-#include <asm/mca.h>
-
-int kdump_status[NR_CPUS];
-static atomic_t kdump_cpu_frozen;
-atomic_t kdump_in_progress;
-static int kdump_freeze_monarch;
-static int kdump_on_init = 1;
-static int kdump_on_fatal_mca = 1;
-
-extern void ia64_dump_cpu_regs(void *);
-
-static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
-
-void
-crash_save_this_cpu(void)
-{
- void *buf;
- unsigned long cfm, sof, sol;
-
- int cpu = smp_processor_id();
- struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
-
- elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
- memset(prstatus, 0, sizeof(*prstatus));
- prstatus->common.pr_pid = current->pid;
-
- ia64_dump_cpu_regs(dst);
- cfm = dst[43];
- sol = (cfm >> 7) & 0x7f;
- sof = cfm & 0x7f;
- dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
- sof - sol);
-
- buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
- if (!buf)
- return;
- buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus,
- sizeof(*prstatus));
- final_note(buf);
-}
-
-#ifdef CONFIG_SMP
-static int
-kdump_wait_cpu_freeze(void)
-{
- int cpu_num = num_online_cpus() - 1;
- int timeout = 1000;
- while(timeout-- > 0) {
- if (atomic_read(&kdump_cpu_frozen) == cpu_num)
- return 0;
- udelay(1000);
- }
- return 1;
-}
-#endif
-
-void
-machine_crash_shutdown(struct pt_regs *pt)
-{
- /* This function is only called after the system
- * has paniced or is otherwise in a critical state.
- * The minimum amount of code to allow a kexec'd kernel
- * to run successfully needs to happen here.
- *
- * In practice this means shooting down the other cpus in
- * an SMP system.
- */
- kexec_disable_iosapic();
-#ifdef CONFIG_SMP
- /*
- * If kdump_on_init is set and an INIT is asserted here, kdump will
- * be started again via INIT monarch.
- */
- local_irq_disable();
- ia64_set_psr_mc(); /* mask MCA/INIT */
- if (atomic_inc_return(&kdump_in_progress) != 1)
- unw_init_running(kdump_cpu_freeze, NULL);
-
- /*
- * Now this cpu is ready for kdump.
- * Stop all others by IPI or INIT. They could receive INIT from
- * outside and might be INIT monarch, but only thing they have to
- * do is falling into kdump_cpu_freeze().
- *
- * If an INIT is asserted here:
- * - All receivers might be slaves, since some of cpus could already
- * be frozen and INIT might be masked on monarch. In this case,
- * all slaves will be frozen soon since kdump_in_progress will let
- * them into DIE_INIT_SLAVE_LEAVE.
- * - One might be a monarch, but INIT rendezvous will fail since
- * at least this cpu already have INIT masked so it never join
- * to the rendezvous. In this case, all slaves and monarch will
- * be frozen soon with no wait since the INIT rendezvous is skipped
- * by kdump_in_progress.
- */
- kdump_smp_send_stop();
- /* not all cpu response to IPI, send INIT to freeze them */
- if (kdump_wait_cpu_freeze()) {
- kdump_smp_send_init();
- /* wait again, don't go ahead if possible */
- kdump_wait_cpu_freeze();
- }
-#endif
-}
-
-static void
-machine_kdump_on_init(void)
-{
- crash_save_vmcoreinfo();
- local_irq_disable();
- kexec_disable_iosapic();
- machine_kexec(ia64_kimage);
-}
-
-void
-kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
-{
- int cpuid;
-
- local_irq_disable();
- cpuid = smp_processor_id();
- crash_save_this_cpu();
- current->thread.ksp = (__u64)info->sw - 16;
-
- ia64_set_psr_mc(); /* mask MCA/INIT and stop reentrance */
-
- atomic_inc(&kdump_cpu_frozen);
- kdump_status[cpuid] = 1;
- mb();
- for (;;)
- cpu_relax();
-}
-
-static int
-kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
-{
- struct ia64_mca_notify_die *nd;
- struct die_args *args = data;
-
- if (atomic_read(&kdump_in_progress)) {
- switch (val) {
- case DIE_INIT_MONARCH_LEAVE:
- if (!kdump_freeze_monarch)
- break;
- fallthrough;
- case DIE_INIT_SLAVE_LEAVE:
- case DIE_INIT_MONARCH_ENTER:
- case DIE_MCA_RENDZVOUS_LEAVE:
- unw_init_running(kdump_cpu_freeze, NULL);
- break;
- }
- }
-
- if (!kdump_on_init && !kdump_on_fatal_mca)
- return NOTIFY_DONE;
-
- if (!ia64_kimage) {
- if (val == DIE_INIT_MONARCH_LEAVE)
- ia64_mca_printk(KERN_NOTICE
- "%s: kdump not configured\n",
- __func__);
- return NOTIFY_DONE;
- }
-
- if (val != DIE_INIT_MONARCH_LEAVE &&
- val != DIE_INIT_MONARCH_PROCESS &&
- val != DIE_MCA_MONARCH_LEAVE)
- return NOTIFY_DONE;
-
- nd = (struct ia64_mca_notify_die *)args->err;
-
- switch (val) {
- case DIE_INIT_MONARCH_PROCESS:
- /* Reason code 1 means machine check rendezvous*/
- if (kdump_on_init && (nd->sos->rv_rc != 1)) {
- if (atomic_inc_return(&kdump_in_progress) != 1)
- kdump_freeze_monarch = 1;
- }
- break;
- case DIE_INIT_MONARCH_LEAVE:
- /* Reason code 1 means machine check rendezvous*/
- if (kdump_on_init && (nd->sos->rv_rc != 1))
- machine_kdump_on_init();
- break;
- case DIE_MCA_MONARCH_LEAVE:
- /* *(nd->data) indicate if MCA is recoverable */
- if (kdump_on_fatal_mca && !(*(nd->data))) {
- if (atomic_inc_return(&kdump_in_progress) == 1)
- machine_kdump_on_init();
- /* We got fatal MCA while kdump!? No way!! */
- }
- break;
- }
- return NOTIFY_DONE;
-}
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table kdump_ctl_table[] = {
- {
- .procname = "kdump_on_init",
- .data = &kdump_on_init,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "kdump_on_fatal_mca",
- .data = &kdump_on_fatal_mca,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-#endif
-
-static int
-machine_crash_setup(void)
-{
- /* be notified before default_monarch_init_process */
- static struct notifier_block kdump_init_notifier_nb = {
- .notifier_call = kdump_init_notifier,
- .priority = 1,
- };
- int ret;
- if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
- return ret;
-#ifdef CONFIG_SYSCTL
- register_sysctl("kernel", kdump_ctl_table);
-#endif
- return 0;
-}
-
-__initcall(machine_crash_setup);
-
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
deleted file mode 100644
index 4ef68e2aa7..0000000000
--- a/arch/ia64/kernel/crash_dump.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * kernel/crash_dump.c - Memory preserving reboot related code.
- *
- * Created by: Simon Horman <horms@verge.net.au>
- * Original code moved from kernel/crash.c
- * Original code comment copied from the i386 version of this file
- */
-
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/crash_dump.h>
-#include <linux/uio.h>
-#include <asm/page.h>
-
-ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
- size_t csize, unsigned long offset)
-{
- void *vaddr;
-
- if (!csize)
- return 0;
- vaddr = __va(pfn<<PAGE_SHIFT);
- csize = copy_to_iter(vaddr + offset, csize, iter);
- return csize;
-}
-
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
deleted file mode 100644
index 258d7b70c0..0000000000
--- a/arch/ia64/kernel/cyclone.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/time.h>
-#include <linux/errno.h>
-#include <linux/timex.h>
-#include <linux/clocksource.h>
-#include <linux/io.h>
-
-/* IBM Summit (EXA) Cyclone counter code*/
-#define CYCLONE_CBAR_ADDR 0xFEB00CD0
-#define CYCLONE_PMCC_OFFSET 0x51A0
-#define CYCLONE_MPMC_OFFSET 0x51D0
-#define CYCLONE_MPCS_OFFSET 0x51A8
-#define CYCLONE_TIMER_FREQ 100000000
-
-int use_cyclone;
-void __init cyclone_setup(void)
-{
- use_cyclone = 1;
-}
-
-static void __iomem *cyclone_mc;
-
-static u64 read_cyclone(struct clocksource *cs)
-{
- return (u64)readq((void __iomem *)cyclone_mc);
-}
-
-static struct clocksource clocksource_cyclone = {
- .name = "cyclone",
- .rating = 300,
- .read = read_cyclone,
- .mask = (1LL << 40) - 1,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-int __init init_cyclone_clock(void)
-{
- u64 __iomem *reg;
- u64 base; /* saved cyclone base address */
- u64 offset; /* offset from pageaddr to cyclone_timer register */
- int i;
- u32 __iomem *cyclone_timer; /* Cyclone MPMC0 register */
-
- if (!use_cyclone)
- return 0;
-
- printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
-
- /* find base address */
- offset = (CYCLONE_CBAR_ADDR);
- reg = ioremap(offset, sizeof(u64));
- if(!reg){
- printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
- " register.\n");
- use_cyclone = 0;
- return -ENODEV;
- }
- base = readq(reg);
- iounmap(reg);
- if(!base){
- printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
- " value.\n");
- use_cyclone = 0;
- return -ENODEV;
- }
-
- /* setup PMCC */
- offset = (base + CYCLONE_PMCC_OFFSET);
- reg = ioremap(offset, sizeof(u64));
- if(!reg){
- printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
- " register.\n");
- use_cyclone = 0;
- return -ENODEV;
- }
- writel(0x00000001,reg);
- iounmap(reg);
-
- /* setup MPCS */
- offset = (base + CYCLONE_MPCS_OFFSET);
- reg = ioremap(offset, sizeof(u64));
- if(!reg){
- printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
- " register.\n");
- use_cyclone = 0;
- return -ENODEV;
- }
- writel(0x00000001,reg);
- iounmap(reg);
-
- /* map in cyclone_timer */
- offset = (base + CYCLONE_MPMC_OFFSET);
- cyclone_timer = ioremap(offset, sizeof(u32));
- if(!cyclone_timer){
- printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
- " register.\n");
- use_cyclone = 0;
- return -ENODEV;
- }
-
- /*quick test to make sure its ticking*/
- for(i=0; i<3; i++){
- u32 old = readl(cyclone_timer);
- int stall = 100;
- while(stall--) barrier();
- if(readl(cyclone_timer) == old){
- printk(KERN_ERR "Summit chipset: Counter not counting!"
- " DISABLED\n");
- iounmap(cyclone_timer);
- cyclone_timer = NULL;
- use_cyclone = 0;
- return -ENODEV;
- }
- }
- /* initialize last tick */
- cyclone_mc = cyclone_timer;
- clocksource_cyclone.archdata.fsys_mmio = cyclone_timer;
- clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ);
-
- return 0;
-}
-
-__initcall(init_cyclone_clock);
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
deleted file mode 100644
index cd0c166bfb..0000000000
--- a/arch/ia64/kernel/dma-mapping.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/dma-map-ops.h>
-#include <linux/export.h>
-
-/* Set this to 1 if there is a HW IOMMU in the system */
-int iommu_detected __read_mostly;
-
-const struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
deleted file mode 100644
index 033f5aead8..0000000000
--- a/arch/ia64/kernel/efi.c
+++ /dev/null
@@ -1,1360 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Extensible Firmware Interface
- *
- * Based on Extensible Firmware Interface Specification version 0.9
- * April 30, 1999
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * All EFI Runtime Services are not implemented yet as EFI only
- * supports physical mode addressing on SoftSDV. This is to be fixed
- * in a future version. --drummond 1999-07-20
- *
- * Implemented EFI runtime services and virtual mode calls. --davidm
- *
- * Goutham Rao: <goutham.rao@intel.com>
- * Skip non-WB memory and ignore empty memory ranges.
- */
-#include <linux/module.h>
-#include <linux/memblock.h>
-#include <linux/crash_dump.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/efi.h>
-#include <linux/kexec.h>
-#include <linux/mm.h>
-
-#include <asm/efi.h>
-#include <asm/io.h>
-#include <asm/kregs.h>
-#include <asm/meminit.h>
-#include <asm/processor.h>
-#include <asm/mca.h>
-#include <asm/sal.h>
-#include <asm/setup.h>
-#include <asm/tlbflush.h>
-
-#define EFI_DEBUG 0
-
-#define ESI_TABLE_GUID \
- EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \
- 0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4)
-
-static unsigned long mps_phys = EFI_INVALID_TABLE_ADDR;
-static __initdata unsigned long palo_phys;
-
-unsigned long __initdata esi_phys = EFI_INVALID_TABLE_ADDR;
-unsigned long hcdp_phys = EFI_INVALID_TABLE_ADDR;
-unsigned long sal_systab_phys = EFI_INVALID_TABLE_ADDR;
-
-static const efi_config_table_type_t arch_tables[] __initconst = {
- {ESI_TABLE_GUID, &esi_phys, "ESI" },
- {HCDP_TABLE_GUID, &hcdp_phys, "HCDP" },
- {MPS_TABLE_GUID, &mps_phys, "MPS" },
- {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, &palo_phys, "PALO" },
- {SAL_SYSTEM_TABLE_GUID, &sal_systab_phys, "SALsystab" },
- {},
-};
-
-extern efi_status_t efi_call_phys (void *, ...);
-
-static efi_runtime_services_t *runtime;
-static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
-
-#define efi_call_virt(f, args...) (*(f))(args)
-
-#define STUB_GET_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_time_cap_t *atc = NULL; \
- efi_status_t ret; \
- \
- if (tc) \
- atc = adjust_arg(tc); \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \
- adjust_arg(tm), atc); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_SET_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_time (efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \
- adjust_arg(tm)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \
- efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix( \
- (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
- adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_time_t *atm = NULL; \
- efi_status_t ret; \
- \
- if (tm) \
- atm = adjust_arg(tm); \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix( \
- (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
- enabled, atm); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_GET_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
- unsigned long *data_size, void *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- u32 *aattr = NULL; \
- efi_status_t ret; \
- \
- if (attr) \
- aattr = adjust_arg(attr); \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix( \
- (efi_get_variable_t *) __va(runtime->get_variable), \
- adjust_arg(name), adjust_arg(vendor), aattr, \
- adjust_arg(data_size), adjust_arg(data)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
- efi_guid_t *vendor) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix( \
- (efi_get_next_variable_t *) __va(runtime->get_next_variable), \
- adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_SET_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
- u32 attr, unsigned long data_size, \
- void *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix( \
- (efi_set_variable_t *) __va(runtime->set_variable), \
- adjust_arg(name), adjust_arg(vendor), attr, data_size, \
- adjust_arg(data)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_next_high_mono_count (u32 *count) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
- __va(runtime->get_next_high_mono_count), \
- adjust_arg(count)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
-static void \
-prefix##_reset_system (int reset_type, efi_status_t status, \
- unsigned long data_size, efi_char16_t *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_char16_t *adata = NULL; \
- \
- if (data) \
- adata = adjust_arg(data); \
- \
- ia64_save_scratch_fpregs(fr); \
- efi_call_##prefix( \
- (efi_reset_system_t *) __va(runtime->reset_system), \
- reset_type, status, data_size, adata); \
- /* should not return, but just in case... */ \
- ia64_load_scratch_fpregs(fr); \
-}
-
-#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
-
-STUB_GET_TIME(phys, phys_ptr)
-STUB_SET_TIME(phys, phys_ptr)
-STUB_GET_WAKEUP_TIME(phys, phys_ptr)
-STUB_SET_WAKEUP_TIME(phys, phys_ptr)
-STUB_GET_VARIABLE(phys, phys_ptr)
-STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
-STUB_SET_VARIABLE(phys, phys_ptr)
-STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
-STUB_RESET_SYSTEM(phys, phys_ptr)
-
-#define id(arg) arg
-
-STUB_GET_TIME(virt, id)
-STUB_SET_TIME(virt, id)
-STUB_GET_WAKEUP_TIME(virt, id)
-STUB_SET_WAKEUP_TIME(virt, id)
-STUB_GET_VARIABLE(virt, id)
-STUB_GET_NEXT_VARIABLE(virt, id)
-STUB_SET_VARIABLE(virt, id)
-STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
-STUB_RESET_SYSTEM(virt, id)
-
-void
-efi_gettimeofday (struct timespec64 *ts)
-{
- efi_time_t tm;
-
- if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) {
- memset(ts, 0, sizeof(*ts));
- return;
- }
-
- ts->tv_sec = mktime64(tm.year, tm.month, tm.day,
- tm.hour, tm.minute, tm.second);
- ts->tv_nsec = tm.nanosecond;
-}
-
-static int
-is_memory_available (efi_memory_desc_t *md)
-{
- if (!(md->attribute & EFI_MEMORY_WB))
- return 0;
-
- switch (md->type) {
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_CONVENTIONAL_MEMORY:
- return 1;
- }
- return 0;
-}
-
-typedef struct kern_memdesc {
- u64 attribute;
- u64 start;
- u64 num_pages;
-} kern_memdesc_t;
-
-static kern_memdesc_t *kern_memmap;
-
-#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
-
-static inline u64
-kmd_end(kern_memdesc_t *kmd)
-{
- return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
-}
-
-static inline u64
-efi_md_end(efi_memory_desc_t *md)
-{
- return (md->phys_addr + efi_md_size(md));
-}
-
-static inline int
-efi_wb(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_WB);
-}
-
-static inline int
-efi_uc(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_UC);
-}
-
-static void
-walk (efi_freemem_callback_t callback, void *arg, u64 attr)
-{
- kern_memdesc_t *k;
- u64 start, end, voff;
-
- voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
- for (k = kern_memmap; k->start != ~0UL; k++) {
- if (k->attribute != attr)
- continue;
- start = PAGE_ALIGN(k->start);
- end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
- if (start < end)
- if ((*callback)(start + voff, end + voff, arg) < 0)
- return;
- }
-}
-
-/*
- * Walk the EFI memory map and call CALLBACK once for each EFI memory
- * descriptor that has memory that is available for OS use.
- */
-void
-efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
-{
- walk(callback, arg, EFI_MEMORY_WB);
-}
-
-/*
- * Walk the EFI memory map and call CALLBACK once for each EFI memory
- * descriptor that has memory that is available for uncached allocator.
- */
-void
-efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
-{
- walk(callback, arg, EFI_MEMORY_UC);
-}
-
-/*
- * Look for the PAL_CODE region reported by EFI and map it using an
- * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
- * Abstraction Layer chapter 11 in ADAG
- */
-void *
-efi_get_pal_addr (void)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- int pal_code_count = 0;
- u64 vaddr, mask;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->type != EFI_PAL_CODE)
- continue;
-
- if (++pal_code_count > 1) {
- printk(KERN_ERR "Too many EFI Pal Code memory ranges, "
- "dropped @ %llx\n", md->phys_addr);
- continue;
- }
- /*
- * The only ITLB entry in region 7 that is used is the one
- * installed by __start(). That entry covers a 64MB range.
- */
- mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
- vaddr = PAGE_OFFSET + md->phys_addr;
-
- /*
- * We must check that the PAL mapping won't overlap with the
- * kernel mapping.
- *
- * PAL code is guaranteed to be aligned on a power of 2 between
- * 4k and 256KB and that only one ITR is needed to map it. This
- * implies that the PAL code is always aligned on its size,
- * i.e., the closest matching page size supported by the TLB.
- * Therefore PAL code is guaranteed never to cross a 64MB unless
- * it is bigger than 64MB (very unlikely!). So for now the
- * following test is enough to determine whether or not we need
- * a dedicated ITR for the PAL code.
- */
- if ((vaddr & mask) == (KERNEL_START & mask)) {
- printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
- __func__);
- continue;
- }
-
- if (efi_md_size(md) > IA64_GRANULE_SIZE)
- panic("Whoa! PAL code size bigger than a granule!");
-
-#if EFI_DEBUG
- mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
-
- printk(KERN_INFO "CPU %d: mapping PAL code "
- "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
- smp_processor_id(), md->phys_addr,
- md->phys_addr + efi_md_size(md),
- vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
-#endif
- return __va(md->phys_addr);
- }
- printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
- __func__);
- return NULL;
-}
-
-
-static u8 __init palo_checksum(u8 *buffer, u32 length)
-{
- u8 sum = 0;
- u8 *end = buffer + length;
-
- while (buffer < end)
- sum = (u8) (sum + *(buffer++));
-
- return sum;
-}
-
-/*
- * Parse and handle PALO table which is published at:
- * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf
- */
-static void __init handle_palo(unsigned long phys_addr)
-{
- struct palo_table *palo = __va(phys_addr);
- u8 checksum;
-
- if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) {
- printk(KERN_INFO "PALO signature incorrect.\n");
- return;
- }
-
- checksum = palo_checksum((u8 *)palo, palo->length);
- if (checksum) {
- printk(KERN_INFO "PALO checksum incorrect.\n");
- return;
- }
-
- setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO);
-}
-
-void
-efi_map_pal_code (void)
-{
- void *pal_vaddr = efi_get_pal_addr ();
- u64 psr;
-
- if (!pal_vaddr)
- return;
-
- /*
- * Cannot write to CRx with PSR.ic=1
- */
- psr = ia64_clear_ic();
- ia64_itr(0x1, IA64_TR_PALCODE,
- GRANULEROUNDDOWN((unsigned long) pal_vaddr),
- pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
- IA64_GRANULE_SHIFT);
- ia64_set_psr(psr); /* restore psr */
-}
-
-void __init
-efi_init (void)
-{
- const efi_system_table_t *efi_systab;
- void *efi_map_start, *efi_map_end;
- u64 efi_desc_size;
- char *cp;
-
- set_bit(EFI_BOOT, &efi.flags);
- set_bit(EFI_64BIT, &efi.flags);
-
- /*
- * It's too early to be able to use the standard kernel command line
- * support...
- */
- for (cp = boot_command_line; *cp; ) {
- if (memcmp(cp, "mem=", 4) == 0) {
- mem_limit = memparse(cp + 4, &cp);
- } else if (memcmp(cp, "max_addr=", 9) == 0) {
- max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
- } else if (memcmp(cp, "min_addr=", 9) == 0) {
- min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
- } else {
- while (*cp != ' ' && *cp)
- ++cp;
- while (*cp == ' ')
- ++cp;
- }
- }
- if (min_addr != 0UL)
- printk(KERN_INFO "Ignoring memory below %lluMB\n",
- min_addr >> 20);
- if (max_addr != ~0UL)
- printk(KERN_INFO "Ignoring memory above %lluMB\n",
- max_addr >> 20);
-
- efi_systab = __va(ia64_boot_param->efi_systab);
-
- /*
- * Verify the EFI Table
- */
- if (efi_systab == NULL)
- panic("Whoa! Can't find EFI system table.\n");
- if (efi_systab_check_header(&efi_systab->hdr))
- panic("Whoa! EFI system table signature incorrect\n");
-
- efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
-
- palo_phys = EFI_INVALID_TABLE_ADDR;
-
- if (efi_config_parse_tables(__va(efi_systab->tables),
- efi_systab->nr_tables,
- arch_tables) != 0)
- return;
-
- if (palo_phys != EFI_INVALID_TABLE_ADDR)
- handle_palo(palo_phys);
-
- runtime = __va(efi_systab->runtime);
- efi.get_time = phys_get_time;
- efi.set_time = phys_set_time;
- efi.get_wakeup_time = phys_get_wakeup_time;
- efi.set_wakeup_time = phys_set_wakeup_time;
- efi.get_variable = phys_get_variable;
- efi.get_next_variable = phys_get_next_variable;
- efi.set_variable = phys_set_variable;
- efi.get_next_high_mono_count = phys_get_next_high_mono_count;
- efi.reset_system = phys_reset_system;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-#if EFI_DEBUG
- /* print EFI memory map: */
- {
- efi_memory_desc_t *md;
- void *p;
- unsigned int i;
-
- for (i = 0, p = efi_map_start; p < efi_map_end;
- ++i, p += efi_desc_size)
- {
- const char *unit;
- unsigned long size;
- char buf[64];
-
- md = p;
- size = md->num_pages << EFI_PAGE_SHIFT;
-
- if ((size >> 40) > 0) {
- size >>= 40;
- unit = "TB";
- } else if ((size >> 30) > 0) {
- size >>= 30;
- unit = "GB";
- } else if ((size >> 20) > 0) {
- size >>= 20;
- unit = "MB";
- } else {
- size >>= 10;
- unit = "KB";
- }
-
- printk("mem%02d: %s "
- "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
- i, efi_md_typeattr_format(buf, sizeof(buf), md),
- md->phys_addr,
- md->phys_addr + efi_md_size(md), size, unit);
- }
- }
-#endif
-
- efi_map_pal_code();
- efi_enter_virtual_mode();
-}
-
-void
-efi_enter_virtual_mode (void)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- efi_status_t status;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->attribute & EFI_MEMORY_RUNTIME) {
- /*
- * Some descriptors have multiple bits set, so the
- * order of the tests is relevant.
- */
- if (md->attribute & EFI_MEMORY_WB) {
- md->virt_addr = (u64) __va(md->phys_addr);
- } else if (md->attribute & EFI_MEMORY_UC) {
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
- } else if (md->attribute & EFI_MEMORY_WC) {
-#if 0
- md->virt_addr = ia64_remap(md->phys_addr,
- (_PAGE_A |
- _PAGE_P |
- _PAGE_D |
- _PAGE_MA_WC |
- _PAGE_PL_0 |
- _PAGE_AR_RW));
-#else
- printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-#endif
- } else if (md->attribute & EFI_MEMORY_WT) {
-#if 0
- md->virt_addr = ia64_remap(md->phys_addr,
- (_PAGE_A |
- _PAGE_P |
- _PAGE_D |
- _PAGE_MA_WT |
- _PAGE_PL_0 |
- _PAGE_AR_RW));
-#else
- printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-#endif
- }
- }
- }
-
- status = efi_call_phys(__va(runtime->set_virtual_address_map),
- ia64_boot_param->efi_memmap_size,
- efi_desc_size,
- ia64_boot_param->efi_memdesc_version,
- ia64_boot_param->efi_memmap);
- if (status != EFI_SUCCESS) {
- printk(KERN_WARNING "warning: unable to switch EFI into "
- "virtual mode (status=%lu)\n", status);
- return;
- }
-
- set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
-
- /*
- * Now that EFI is in virtual mode, we call the EFI functions more
- * efficiently:
- */
- efi.get_time = virt_get_time;
- efi.set_time = virt_set_time;
- efi.get_wakeup_time = virt_get_wakeup_time;
- efi.set_wakeup_time = virt_set_wakeup_time;
- efi.get_variable = virt_get_variable;
- efi.get_next_variable = virt_get_next_variable;
- efi.set_variable = virt_set_variable;
- efi.get_next_high_mono_count = virt_get_next_high_mono_count;
- efi.reset_system = virt_reset_system;
-}
-
-/*
- * Walk the EFI memory map looking for the I/O port range. There can only be
- * one entry of this type, other I/O port ranges should be described via ACPI.
- */
-u64
-efi_get_iobase (void)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
- if (md->attribute & EFI_MEMORY_UC)
- return md->phys_addr;
- }
- }
- return 0;
-}
-
-static struct kern_memdesc *
-kern_memory_descriptor (unsigned long phys_addr)
-{
- struct kern_memdesc *md;
-
- for (md = kern_memmap; md->start != ~0UL; md++) {
- if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
- return md;
- }
- return NULL;
-}
-
-static efi_memory_desc_t *
-efi_memory_descriptor (unsigned long phys_addr)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- if (phys_addr - md->phys_addr < efi_md_size(md))
- return md;
- }
- return NULL;
-}
-
-static int
-efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- unsigned long end;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- end = phys_addr + size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->phys_addr < end && efi_md_end(md) > phys_addr)
- return 1;
- }
- return 0;
-}
-
-int
-efi_mem_type (unsigned long phys_addr)
-{
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
-
- if (md)
- return md->type;
- return -EINVAL;
-}
-
-u64
-efi_mem_attributes (unsigned long phys_addr)
-{
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
-
- if (md)
- return md->attribute;
- return 0;
-}
-EXPORT_SYMBOL(efi_mem_attributes);
-
-u64
-efi_mem_attribute (unsigned long phys_addr, unsigned long size)
-{
- unsigned long end = phys_addr + size;
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
- u64 attr;
-
- if (!md)
- return 0;
-
- /*
- * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
- * the kernel that firmware needs this region mapped.
- */
- attr = md->attribute & ~EFI_MEMORY_RUNTIME;
- do {
- unsigned long md_end = efi_md_end(md);
-
- if (end <= md_end)
- return attr;
-
- md = efi_memory_descriptor(md_end);
- if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
- return 0;
- } while (md);
- return 0; /* never reached */
-}
-
-u64
-kern_mem_attribute (unsigned long phys_addr, unsigned long size)
-{
- unsigned long end = phys_addr + size;
- struct kern_memdesc *md;
- u64 attr;
-
- /*
- * This is a hack for ioremap calls before we set up kern_memmap.
- * Maybe we should do efi_memmap_init() earlier instead.
- */
- if (!kern_memmap) {
- attr = efi_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB)
- return EFI_MEMORY_WB;
- return 0;
- }
-
- md = kern_memory_descriptor(phys_addr);
- if (!md)
- return 0;
-
- attr = md->attribute;
- do {
- unsigned long md_end = kmd_end(md);
-
- if (end <= md_end)
- return attr;
-
- md = kern_memory_descriptor(md_end);
- if (!md || md->attribute != attr)
- return 0;
- } while (md);
- return 0; /* never reached */
-}
-
-int
-valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size)
-{
- u64 attr;
-
- /*
- * /dev/mem reads and writes use copy_to_user(), which implicitly
- * uses a granule-sized kernel identity mapping. It's really
- * only safe to do this for regions in kern_memmap. For more
- * details, see Documentation/arch/ia64/aliasing.rst.
- */
- attr = kern_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
- return 1;
- return 0;
-}
-
-int
-valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
-{
- unsigned long phys_addr = pfn << PAGE_SHIFT;
- u64 attr;
-
- attr = efi_mem_attribute(phys_addr, size);
-
- /*
- * /dev/mem mmap uses normal user pages, so we don't need the entire
- * granule, but the entire region we're mapping must support the same
- * attribute.
- */
- if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
- return 1;
-
- /*
- * Intel firmware doesn't tell us about all the MMIO regions, so
- * in general we have to allow mmap requests. But if EFI *does*
- * tell us about anything inside this region, we should deny it.
- * The user can always map a smaller region to avoid the overlap.
- */
- if (efi_memmap_intersects(phys_addr, size))
- return 0;
-
- return 1;
-}
-
-pgprot_t
-phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
- pgprot_t vma_prot)
-{
- unsigned long phys_addr = pfn << PAGE_SHIFT;
- u64 attr;
-
- /*
- * For /dev/mem mmap, we use user mappings, but if the region is
- * in kern_memmap (and hence may be covered by a kernel mapping),
- * we must use the same attribute as the kernel mapping.
- */
- attr = kern_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB)
- return pgprot_cacheable(vma_prot);
- else if (attr & EFI_MEMORY_UC)
- return pgprot_noncached(vma_prot);
-
- /*
- * Some chipsets don't support UC access to memory. If
- * WB is supported, we prefer that.
- */
- if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
- return pgprot_cacheable(vma_prot);
-
- return pgprot_noncached(vma_prot);
-}
-
-int __init
-efi_uart_console_only(void)
-{
- efi_status_t status;
- char *s, name[] = "ConOut";
- efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
- efi_char16_t *utf16, name_utf16[32];
- unsigned char data[1024];
- unsigned long size = sizeof(data);
- struct efi_generic_dev_path *hdr, *end_addr;
- int uart = 0;
-
- /* Convert to UTF-16 */
- utf16 = name_utf16;
- s = name;
- while (*s)
- *utf16++ = *s++ & 0x7f;
- *utf16 = 0;
-
- status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
- if (status != EFI_SUCCESS) {
- printk(KERN_ERR "No EFI %s variable?\n", name);
- return 0;
- }
-
- hdr = (struct efi_generic_dev_path *) data;
- end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
- while (hdr < end_addr) {
- if (hdr->type == EFI_DEV_MSG &&
- hdr->sub_type == EFI_DEV_MSG_UART)
- uart = 1;
- else if (hdr->type == EFI_DEV_END_PATH ||
- hdr->type == EFI_DEV_END_PATH2) {
- if (!uart)
- return 0;
- if (hdr->sub_type == EFI_DEV_END_ENTIRE)
- return 1;
- uart = 0;
- }
- hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length);
- }
- printk(KERN_ERR "Malformed %s value\n", name);
- return 0;
-}
-
-/*
- * Look for the first granule aligned memory descriptor memory
- * that is big enough to hold EFI memory map. Make sure this
- * descriptor is at least granule sized so it does not get trimmed
- */
-struct kern_memdesc *
-find_memmap_space (void)
-{
- u64 contig_low=0, contig_high=0;
- u64 as = 0, ae;
- void *efi_map_start, *efi_map_end, *p, *q;
- efi_memory_desc_t *md, *pmd = NULL, *check_md;
- u64 space_needed, efi_desc_size;
- unsigned long total_mem = 0;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- /*
- * Worst case: we need 3 kernel descriptors for each efi descriptor
- * (if every entry has a WB part in the middle, and UC head and tail),
- * plus one for the end marker.
- */
- space_needed = sizeof(kern_memdesc_t) *
- (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
-
- for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
- md = p;
- if (!efi_wb(md)) {
- continue;
- }
- if (pmd == NULL || !efi_wb(pmd) ||
- efi_md_end(pmd) != md->phys_addr) {
- contig_low = GRANULEROUNDUP(md->phys_addr);
- contig_high = efi_md_end(md);
- for (q = p + efi_desc_size; q < efi_map_end;
- q += efi_desc_size) {
- check_md = q;
- if (!efi_wb(check_md))
- break;
- if (contig_high != check_md->phys_addr)
- break;
- contig_high = efi_md_end(check_md);
- }
- contig_high = GRANULEROUNDDOWN(contig_high);
- }
- if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
- continue;
-
- /* Round ends inward to granule boundaries */
- as = max(contig_low, md->phys_addr);
- ae = min(contig_high, efi_md_end(md));
-
- /* keep within max_addr= and min_addr= command line arg */
- as = max(as, min_addr);
- ae = min(ae, max_addr);
- if (ae <= as)
- continue;
-
- /* avoid going over mem= command line arg */
- if (total_mem + (ae - as) > mem_limit)
- ae -= total_mem + (ae - as) - mem_limit;
-
- if (ae <= as)
- continue;
-
- if (ae - as > space_needed)
- break;
- }
- if (p >= efi_map_end)
- panic("Can't allocate space for kernel memory descriptors");
-
- return __va(as);
-}
-
-/*
- * Walk the EFI memory map and gather all memory available for kernel
- * to use. We can allocate partial granules only if the unavailable
- * parts exist, and are WB.
- */
-unsigned long
-efi_memmap_init(u64 *s, u64 *e)
-{
- struct kern_memdesc *k, *prev = NULL;
- u64 contig_low=0, contig_high=0;
- u64 as, ae, lim;
- void *efi_map_start, *efi_map_end, *p, *q;
- efi_memory_desc_t *md, *pmd = NULL, *check_md;
- u64 efi_desc_size;
- unsigned long total_mem = 0;
-
- k = kern_memmap = find_memmap_space();
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
- md = p;
- if (!efi_wb(md)) {
- if (efi_uc(md) &&
- (md->type == EFI_CONVENTIONAL_MEMORY ||
- md->type == EFI_BOOT_SERVICES_DATA)) {
- k->attribute = EFI_MEMORY_UC;
- k->start = md->phys_addr;
- k->num_pages = md->num_pages;
- k++;
- }
- continue;
- }
- if (pmd == NULL || !efi_wb(pmd) ||
- efi_md_end(pmd) != md->phys_addr) {
- contig_low = GRANULEROUNDUP(md->phys_addr);
- contig_high = efi_md_end(md);
- for (q = p + efi_desc_size; q < efi_map_end;
- q += efi_desc_size) {
- check_md = q;
- if (!efi_wb(check_md))
- break;
- if (contig_high != check_md->phys_addr)
- break;
- contig_high = efi_md_end(check_md);
- }
- contig_high = GRANULEROUNDDOWN(contig_high);
- }
- if (!is_memory_available(md))
- continue;
-
- /*
- * Round ends inward to granule boundaries
- * Give trimmings to uncached allocator
- */
- if (md->phys_addr < contig_low) {
- lim = min(efi_md_end(md), contig_low);
- if (efi_uc(md)) {
- if (k > kern_memmap &&
- (k-1)->attribute == EFI_MEMORY_UC &&
- kmd_end(k-1) == md->phys_addr) {
- (k-1)->num_pages +=
- (lim - md->phys_addr)
- >> EFI_PAGE_SHIFT;
- } else {
- k->attribute = EFI_MEMORY_UC;
- k->start = md->phys_addr;
- k->num_pages = (lim - md->phys_addr)
- >> EFI_PAGE_SHIFT;
- k++;
- }
- }
- as = contig_low;
- } else
- as = md->phys_addr;
-
- if (efi_md_end(md) > contig_high) {
- lim = max(md->phys_addr, contig_high);
- if (efi_uc(md)) {
- if (lim == md->phys_addr && k > kern_memmap &&
- (k-1)->attribute == EFI_MEMORY_UC &&
- kmd_end(k-1) == md->phys_addr) {
- (k-1)->num_pages += md->num_pages;
- } else {
- k->attribute = EFI_MEMORY_UC;
- k->start = lim;
- k->num_pages = (efi_md_end(md) - lim)
- >> EFI_PAGE_SHIFT;
- k++;
- }
- }
- ae = contig_high;
- } else
- ae = efi_md_end(md);
-
- /* keep within max_addr= and min_addr= command line arg */
- as = max(as, min_addr);
- ae = min(ae, max_addr);
- if (ae <= as)
- continue;
-
- /* avoid going over mem= command line arg */
- if (total_mem + (ae - as) > mem_limit)
- ae -= total_mem + (ae - as) - mem_limit;
-
- if (ae <= as)
- continue;
- if (prev && kmd_end(prev) == md->phys_addr) {
- prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
- total_mem += ae - as;
- continue;
- }
- k->attribute = EFI_MEMORY_WB;
- k->start = as;
- k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
- total_mem += ae - as;
- prev = k++;
- }
- k->start = ~0L; /* end-marker */
-
- /* reserve the memory we are using for kern_memmap */
- *s = (u64)kern_memmap;
- *e = (u64)++k;
-
- return total_mem;
-}
-
-void
-efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource,
- struct resource *bss_resource)
-{
- struct resource *res;
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- char *name;
- unsigned long flags, desc;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- res = NULL;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- if (md->num_pages == 0) /* should not happen */
- continue;
-
- flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- desc = IORES_DESC_NONE;
-
- switch (md->type) {
-
- case EFI_MEMORY_MAPPED_IO:
- case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
- continue;
-
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_CONVENTIONAL_MEMORY:
- if (md->attribute & EFI_MEMORY_WP) {
- name = "System ROM";
- flags |= IORESOURCE_READONLY;
- } else if (md->attribute == EFI_MEMORY_UC) {
- name = "Uncached RAM";
- } else {
- name = "System RAM";
- flags |= IORESOURCE_SYSRAM;
- }
- break;
-
- case EFI_ACPI_MEMORY_NVS:
- name = "ACPI Non-volatile Storage";
- desc = IORES_DESC_ACPI_NV_STORAGE;
- break;
-
- case EFI_UNUSABLE_MEMORY:
- name = "reserved";
- flags |= IORESOURCE_DISABLED;
- break;
-
- case EFI_PERSISTENT_MEMORY:
- name = "Persistent Memory";
- desc = IORES_DESC_PERSISTENT_MEMORY;
- break;
-
- case EFI_RESERVED_TYPE:
- case EFI_RUNTIME_SERVICES_CODE:
- case EFI_RUNTIME_SERVICES_DATA:
- case EFI_ACPI_RECLAIM_MEMORY:
- default:
- name = "reserved";
- break;
- }
-
- if ((res = kzalloc(sizeof(struct resource),
- GFP_KERNEL)) == NULL) {
- printk(KERN_ERR
- "failed to allocate resource for iomem\n");
- return;
- }
-
- res->name = name;
- res->start = md->phys_addr;
- res->end = md->phys_addr + efi_md_size(md) - 1;
- res->flags = flags;
- res->desc = desc;
-
- if (insert_resource(&iomem_resource, res) < 0)
- kfree(res);
- else {
- /*
- * We don't know which region contains
- * kernel data so we try it repeatedly and
- * let the resource manager test it.
- */
- insert_resource(res, code_resource);
- insert_resource(res, data_resource);
- insert_resource(res, bss_resource);
-#ifdef CONFIG_KEXEC
- insert_resource(res, &efi_memmap_res);
- insert_resource(res, &boot_param_res);
- if (crashk_res.end > crashk_res.start)
- insert_resource(res, &crashk_res);
-#endif
- }
- }
-}
-
-#ifdef CONFIG_KEXEC
-/* find a block of memory aligned to 64M exclude reserved regions
- rsvd_regions are sorted
- */
-unsigned long __init
-kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n)
-{
- int i;
- u64 start, end;
- u64 alignment = 1UL << _PAGE_SIZE_64M;
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (!efi_wb(md))
- continue;
- start = ALIGN(md->phys_addr, alignment);
- end = efi_md_end(md);
- for (i = 0; i < n; i++) {
- if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
- if (__pa(r[i].start) > start + size)
- return start;
- start = ALIGN(__pa(r[i].end), alignment);
- if (i < n-1 &&
- __pa(r[i+1].start) < start + size)
- continue;
- else
- break;
- }
- }
- if (end > start + size)
- return start;
- }
-
- printk(KERN_WARNING
- "Cannot reserve 0x%lx byte of memory for crashdump\n", size);
- return ~0UL;
-}
-#endif
-
-#ifdef CONFIG_CRASH_DUMP
-/* locate the size find a the descriptor at a certain address */
-unsigned long __init
-vmcore_find_descriptor_size (unsigned long address)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- unsigned long ret = 0;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (efi_wb(md) && md->type == EFI_LOADER_DATA
- && md->phys_addr == address) {
- ret = efi_md_size(md);
- break;
- }
- }
-
- if (ret == 0)
- printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
-
- return ret;
-}
-#endif
-
-char *efi_systab_show_arch(char *str)
-{
- if (mps_phys != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "MPS=0x%lx\n", mps_phys);
- if (hcdp_phys != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "HCDP=0x%lx\n", hcdp_phys);
- return str;
-}
diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S
deleted file mode 100644
index 1fd61b78fb..0000000000
--- a/arch/ia64/kernel/efi_stub.S
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * EFI call stub.
- *
- * Copyright (C) 1999-2001 Hewlett-Packard Co
- * David Mosberger <davidm@hpl.hp.com>
- *
- * This stub allows us to make EFI calls in physical mode with interrupts
- * turned off. We need this because we can't call SetVirtualMap() until
- * the kernel has booted far enough to allow allocation of struct vm_area_struct
- * entries (which we would need to map stuff with memory attributes other
- * than uncached or writeback...). Since the GetTime() service gets called
- * earlier than that, we need to be able to make physical mode EFI calls from
- * the kernel.
- */
-
-/*
- * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System
- * Abstraction Layer Specification", revision 2.6e). Note that
- * psr.dfl and psr.dfh MUST be cleared, despite what this manual says.
- * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call
- * (the br.ia instruction fails unless psr.dfl and psr.dfh are
- * cleared). Fortunately, SAL promises not to touch the floating
- * point regs, so at least we don't have to save f2-f127.
- */
-#define PSR_BITS_TO_CLEAR \
- (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
- IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
- IA64_PSR_DFL | IA64_PSR_DFH)
-
-#define PSR_BITS_TO_SET \
- (IA64_PSR_BN)
-
-#include <asm/processor.h>
-#include <asm/asmmacro.h>
-
-/*
- * Inputs:
- * in0 = address of function descriptor of EFI routine to call
- * in1..in7 = arguments to routine
- *
- * Outputs:
- * r8 = EFI_STATUS returned by called function
- */
-
-GLOBAL_ENTRY(efi_call_phys)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,7,7,0
- ld8 r2=[in0],8 // load EFI function's entry point
- mov loc0=rp
- .body
- ;;
- mov loc2=gp // save global pointer
- mov loc4=ar.rsc // save RSE configuration
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- ;;
- ld8 gp=[in0] // load EFI function's global pointer
- movl r16=PSR_BITS_TO_CLEAR
- mov loc3=psr // save processor status word
- movl r17=PSR_BITS_TO_SET
- ;;
- or loc3=loc3,r17
- mov b6=r2
- ;;
- andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
- br.call.sptk.many rp=ia64_switch_mode_phys
-.ret0: mov out4=in5
- mov out0=in1
- mov out1=in2
- mov out2=in3
- mov out3=in4
- mov out5=in6
- mov out6=in7
- mov loc5=r19
- mov loc6=r20
- br.call.sptk.many rp=b6 // call the EFI function
-.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov r16=loc3
- mov r19=loc5
- mov r20=loc6
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-.ret2: mov ar.rsc=loc4 // restore RSE configuration
- mov ar.pfs=loc1
- mov rp=loc0
- mov gp=loc2
- br.ret.sptk.many rp
-END(efi_call_phys)
diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c
deleted file mode 100644
index 8895df1215..0000000000
--- a/arch/ia64/kernel/elfcore.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/elf.h>
-#include <linux/coredump.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-
-#include <asm/elf.h>
-
-
-Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm)
-{
- return GATE_EHDR->e_phnum;
-}
-
-int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
-{
- const struct elf_phdr *const gate_phdrs =
- (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
- int i;
- Elf64_Off ofs = 0;
-
- for (i = 0; i < GATE_EHDR->e_phnum; ++i) {
- struct elf_phdr phdr = gate_phdrs[i];
-
- if (phdr.p_type == PT_LOAD) {
- phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz);
- phdr.p_filesz = phdr.p_memsz;
- if (ofs == 0) {
- ofs = phdr.p_offset = offset;
- offset += phdr.p_filesz;
- } else {
- phdr.p_offset = ofs;
- }
- } else {
- phdr.p_offset += ofs;
- }
- phdr.p_paddr = 0; /* match other core phdrs */
- if (!dump_emit(cprm, &phdr, sizeof(phdr)))
- return 0;
- }
- return 1;
-}
-
-int elf_core_write_extra_data(struct coredump_params *cprm)
-{
- const struct elf_phdr *const gate_phdrs =
- (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
- int i;
-
- for (i = 0; i < GATE_EHDR->e_phnum; ++i) {
- if (gate_phdrs[i].p_type == PT_LOAD) {
- void *addr = (void *)gate_phdrs[i].p_vaddr;
- size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz);
-
- if (!dump_emit(cprm, addr, memsz))
- return 0;
- break;
- }
- }
- return 1;
-}
-
-size_t elf_core_extra_data_size(struct coredump_params *cprm)
-{
- const struct elf_phdr *const gate_phdrs =
- (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
- int i;
- size_t size = 0;
-
- for (i = 0; i < GATE_EHDR->e_phnum; ++i) {
- if (gate_phdrs[i].p_type == PT_LOAD) {
- size += PAGE_ALIGN(gate_phdrs[i].p_memsz);
- break;
- }
- }
- return size;
-}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
deleted file mode 100644
index ac06d44b9b..0000000000
--- a/arch/ia64/kernel/entry.S
+++ /dev/null
@@ -1,1427 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/ia64/kernel/entry.S
- *
- * Kernel entry points.
- *
- * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999, 2002-2003
- * Asit Mallick <Asit.K.Mallick@intel.com>
- * Don Dugger <Don.Dugger@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- */
-/*
- * ia64_switch_to now places correct virtual mapping in in TR2 for
- * kernel stack. This allows us to handle interrupts without changing
- * to physical mode.
- *
- * Jonathan Nicklin <nicklin@missioncriticallinux.com>
- * Patrick O'Rourke <orourke@missioncriticallinux.com>
- * 11/07/2000
- */
-/*
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * pv_ops.
- */
-/*
- * Global (preserved) predicate usage on syscall entry/exit path:
- *
- * pKStk: See entry.h.
- * pUStk: See entry.h.
- * pSys: See entry.h.
- * pNonSys: !pSys
- */
-
-#include <linux/export.h>
-#include <linux/pgtable.h>
-#include <asm/asmmacro.h>
-#include <asm/cache.h>
-#include <asm/errno.h>
-#include <asm/kregs.h>
-#include <asm/asm-offsets.h>
-#include <asm/percpu.h>
-#include <asm/processor.h>
-#include <asm/thread_info.h>
-#include <asm/unistd.h>
-#include <asm/ftrace.h>
-
-#include "minstate.h"
-
- /*
- * execve() is special because in case of success, we need to
- * setup a null register window frame.
- */
-ENTRY(ia64_execve)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,3,0
- mov loc0=rp
- .body
- mov out0=in0 // filename
- ;; // stop bit between alloc and call
- mov out1=in1 // argv
- mov out2=in2 // envp
- br.call.sptk.many rp=sys_execve
-.ret0:
- cmp4.ge p6,p7=r8,r0
- mov ar.pfs=loc1 // restore ar.pfs
- sxt4 r8=r8 // return 64-bit result
- ;;
- stf.spill [sp]=f0
- mov rp=loc0
-(p6) mov ar.pfs=r0 // clear ar.pfs on success
-(p7) br.ret.sptk.many rp
-
- /*
- * In theory, we'd have to zap this state only to prevent leaking of
- * security sensitive state (e.g., if current->mm->dumpable is zero). However,
- * this executes in less than 20 cycles even on Itanium, so it's not worth
- * optimizing for...).
- */
- mov ar.unat=0; mov ar.lc=0
- mov r4=0; mov f2=f0; mov b1=r0
- mov r5=0; mov f3=f0; mov b2=r0
- mov r6=0; mov f4=f0; mov b3=r0
- mov r7=0; mov f5=f0; mov b4=r0
- ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
- ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
- ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
- ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
- ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
- ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
- ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
- br.ret.sptk.many rp
-END(ia64_execve)
-
-/*
- * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
- * u64 tls)
- */
-GLOBAL_ENTRY(sys_clone2)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc r16=ar.pfs,8,2,6,0
- DO_SAVE_SWITCH_STACK
- mov loc0=rp
- mov loc1=r16 // save ar.pfs across ia64_clone
- .body
- mov out0=in0
- mov out1=in1
- mov out2=in2
- mov out3=in3
- mov out4=in4
- mov out5=in5
- br.call.sptk.many rp=ia64_clone
-.ret1: .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(sys_clone2)
-
-/*
- * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
- * Deprecated. Use sys_clone2() instead.
- */
-GLOBAL_ENTRY(sys_clone)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc r16=ar.pfs,8,2,6,0
- DO_SAVE_SWITCH_STACK
- mov loc0=rp
- mov loc1=r16 // save ar.pfs across ia64_clone
- .body
- mov out0=in0
- mov out1=in1
- mov out2=16 // stacksize (compensates for 16-byte scratch area)
- mov out3=in3
- mov out4=in4
- mov out5=in5
- br.call.sptk.many rp=ia64_clone
-.ret2: .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(sys_clone)
-
-/*
- * prev_task <- ia64_switch_to(struct task_struct *next)
- * With Ingo's new scheduler, interrupts are disabled when this routine gets
- * called. The code starting at .map relies on this. The rest of the code
- * doesn't care about the interrupt masking status.
- */
-GLOBAL_ENTRY(ia64_switch_to)
- .prologue
- alloc r16=ar.pfs,1,0,0,0
- DO_SAVE_SWITCH_STACK
- .body
-
- adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
- movl r25=init_task
- mov r27=IA64_KR(CURRENT_STACK)
- adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
- dep r20=0,in0,61,3 // physical address of "next"
- ;;
- st8 [r22]=sp // save kernel stack pointer of old task
- shr.u r26=r20,IA64_GRANULE_SHIFT
- cmp.eq p7,p6=r25,in0
- ;;
- /*
- * If we've already mapped this task's page, we can skip doing it again.
- */
-(p6) cmp.eq p7,p6=r26,r27
-(p6) br.cond.dpnt .map
- ;;
-.done:
- ld8 sp=[r21] // load kernel stack pointer of new task
- MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register
- mov r8=r13 // return pointer to previously running task
- mov r13=in0 // set "current" pointer
- ;;
- DO_LOAD_SWITCH_STACK
-
-#ifdef CONFIG_SMP
- sync.i // ensure "fc"s done by this CPU are visible on other CPUs
-#endif
- br.ret.sptk.many rp // boogie on out in new context
-
-.map:
- RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
- movl r25=PAGE_KERNEL
- ;;
- srlz.d
- or r23=r25,r20 // construct PA | page properties
- mov r25=IA64_GRANULE_SHIFT<<2
- ;;
- MOV_TO_ITIR(p0, r25, r8)
- MOV_TO_IFA(in0, r8) // VA of next task...
- ;;
- mov r25=IA64_TR_CURRENT_STACK
- MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped...
- ;;
- itr.d dtr[r25]=r23 // wire in new mapping...
- SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
- br.cond.sptk .done
-END(ia64_switch_to)
-
-/*
- * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
- * means that we may get an interrupt with "sp" pointing to the new kernel stack while
- * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc,
- * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
- * problem. Also, we don't need to specify unwind information for preserved registers
- * that are not modified in save_switch_stack as the right unwind information is already
- * specified at the call-site of save_switch_stack.
- */
-
-/*
- * save_switch_stack:
- * - r16 holds ar.pfs
- * - b7 holds address to return to
- * - rp (b0) holds return address to save
- */
-GLOBAL_ENTRY(save_switch_stack)
- .prologue
- .altrp b7
- flushrs // flush dirty regs to backing store (must be first in insn group)
- .save @priunat,r17
- mov r17=ar.unat // preserve caller's
- .body
-#ifdef CONFIG_ITANIUM
- adds r2=16+128,sp
- adds r3=16+64,sp
- adds r14=SW(R4)+16,sp
- ;;
- st8.spill [r14]=r4,16 // spill r4
- lfetch.fault.excl.nt1 [r3],128
- ;;
- lfetch.fault.excl.nt1 [r2],128
- lfetch.fault.excl.nt1 [r3],128
- ;;
- lfetch.fault.excl [r2]
- lfetch.fault.excl [r3]
- adds r15=SW(R5)+16,sp
-#else
- add r2=16+3*128,sp
- add r3=16,sp
- add r14=SW(R4)+16,sp
- ;;
- st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0
- lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010
- ;;
- lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090
- lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190
- ;;
- lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110
- lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210
- adds r15=SW(R5)+16,sp
-#endif
- ;;
- st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
- mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
- add r2=SW(F2)+16,sp // r2 = &sw->f2
- ;;
- st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
- mov.m r18=ar.fpsr // preserve fpsr
- add r3=SW(F3)+16,sp // r3 = &sw->f3
- ;;
- stf.spill [r2]=f2,32
- mov.m r19=ar.rnat
- mov r21=b0
-
- stf.spill [r3]=f3,32
- st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
- mov r22=b1
- ;;
- // since we're done with the spills, read and save ar.unat:
- mov.m r29=ar.unat
- mov.m r20=ar.bspstore
- mov r23=b2
- stf.spill [r2]=f4,32
- stf.spill [r3]=f5,32
- mov r24=b3
- ;;
- st8 [r14]=r21,SW(B1)-SW(B0) // save b0
- st8 [r15]=r23,SW(B3)-SW(B2) // save b2
- mov r25=b4
- mov r26=b5
- ;;
- st8 [r14]=r22,SW(B4)-SW(B1) // save b1
- st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
- mov r21=ar.lc // I-unit
- stf.spill [r2]=f12,32
- stf.spill [r3]=f13,32
- ;;
- st8 [r14]=r25,SW(B5)-SW(B4) // save b4
- st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
- stf.spill [r2]=f14,32
- stf.spill [r3]=f15,32
- ;;
- st8 [r14]=r26 // save b5
- st8 [r15]=r21 // save ar.lc
- stf.spill [r2]=f16,32
- stf.spill [r3]=f17,32
- ;;
- stf.spill [r2]=f18,32
- stf.spill [r3]=f19,32
- ;;
- stf.spill [r2]=f20,32
- stf.spill [r3]=f21,32
- ;;
- stf.spill [r2]=f22,32
- stf.spill [r3]=f23,32
- ;;
- stf.spill [r2]=f24,32
- stf.spill [r3]=f25,32
- ;;
- stf.spill [r2]=f26,32
- stf.spill [r3]=f27,32
- ;;
- stf.spill [r2]=f28,32
- stf.spill [r3]=f29,32
- ;;
- stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
- stf.spill [r3]=f31,SW(PR)-SW(F31)
- add r14=SW(CALLER_UNAT)+16,sp
- ;;
- st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
- st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
- mov r21=pr
- ;;
- st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
- st8 [r3]=r21 // save predicate registers
- ;;
- st8 [r2]=r20 // save ar.bspstore
- st8 [r14]=r18 // save fpsr
- mov ar.rsc=3 // put RSE back into eager mode, pl 0
- br.cond.sptk.many b7
-END(save_switch_stack)
-
-/*
- * load_switch_stack:
- * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
- * - b7 holds address to return to
- * - must not touch r8-r11
- */
-GLOBAL_ENTRY(load_switch_stack)
- .prologue
- .altrp b7
-
- .body
- lfetch.fault.nt1 [sp]
- adds r2=SW(AR_BSPSTORE)+16,sp
- adds r3=SW(AR_UNAT)+16,sp
- mov ar.rsc=0 // put RSE into enforced lazy mode
- adds r14=SW(CALLER_UNAT)+16,sp
- adds r15=SW(AR_FPSR)+16,sp
- ;;
- ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
- ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
- ;;
- ld8 r21=[r2],16 // restore b0
- ld8 r22=[r3],16 // restore b1
- ;;
- ld8 r23=[r2],16 // restore b2
- ld8 r24=[r3],16 // restore b3
- ;;
- ld8 r25=[r2],16 // restore b4
- ld8 r26=[r3],16 // restore b5
- ;;
- ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
- ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
- ;;
- ld8 r28=[r2] // restore pr
- ld8 r30=[r3] // restore rnat
- ;;
- ld8 r18=[r14],16 // restore caller's unat
- ld8 r19=[r15],24 // restore fpsr
- ;;
- ldf.fill f2=[r14],32
- ldf.fill f3=[r15],32
- ;;
- ldf.fill f4=[r14],32
- ldf.fill f5=[r15],32
- ;;
- ldf.fill f12=[r14],32
- ldf.fill f13=[r15],32
- ;;
- ldf.fill f14=[r14],32
- ldf.fill f15=[r15],32
- ;;
- ldf.fill f16=[r14],32
- ldf.fill f17=[r15],32
- ;;
- ldf.fill f18=[r14],32
- ldf.fill f19=[r15],32
- mov b0=r21
- ;;
- ldf.fill f20=[r14],32
- ldf.fill f21=[r15],32
- mov b1=r22
- ;;
- ldf.fill f22=[r14],32
- ldf.fill f23=[r15],32
- mov b2=r23
- ;;
- mov ar.bspstore=r27
- mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
- mov b3=r24
- ;;
- ldf.fill f24=[r14],32
- ldf.fill f25=[r15],32
- mov b4=r25
- ;;
- ldf.fill f26=[r14],32
- ldf.fill f27=[r15],32
- mov b5=r26
- ;;
- ldf.fill f28=[r14],32
- ldf.fill f29=[r15],32
- mov ar.pfs=r16
- ;;
- ldf.fill f30=[r14],32
- ldf.fill f31=[r15],24
- mov ar.lc=r17
- ;;
- ld8.fill r4=[r14],16
- ld8.fill r5=[r15],16
- mov pr=r28,-1
- ;;
- ld8.fill r6=[r14],16
- ld8.fill r7=[r15],16
-
- mov ar.unat=r18 // restore caller's unat
- mov ar.rnat=r30 // must restore after bspstore but before rsc!
- mov ar.fpsr=r19 // restore fpsr
- mov ar.rsc=3 // put RSE back into eager mode, pl 0
- br.cond.sptk.many b7
-END(load_switch_stack)
-
- /*
- * Invoke a system call, but do some tracing before and after the call.
- * We MUST preserve the current register frame throughout this routine
- * because some system calls (such as ia64_execve) directly
- * manipulate ar.pfs.
- */
-GLOBAL_ENTRY(ia64_trace_syscall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * We need to preserve the scratch registers f6-f11 in case the system
- * call is sigreturn.
- */
- adds r16=PT(F6)+16,sp
- adds r17=PT(F7)+16,sp
- ;;
- stf.spill [r16]=f6,32
- stf.spill [r17]=f7,32
- ;;
- stf.spill [r16]=f8,32
- stf.spill [r17]=f9,32
- ;;
- stf.spill [r16]=f10
- stf.spill [r17]=f11
- br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
- cmp.lt p6,p0=r8,r0 // check tracehook
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
- mov r10=0
-(p6) br.cond.sptk strace_error // syscall failed ->
- adds r16=PT(F6)+16,sp
- adds r17=PT(F7)+16,sp
- ;;
- ldf.fill f6=[r16],32
- ldf.fill f7=[r17],32
- ;;
- ldf.fill f8=[r16],32
- ldf.fill f9=[r17],32
- ;;
- ldf.fill f10=[r16]
- ldf.fill f11=[r17]
- // the syscall number may have changed, so re-load it and re-calculate the
- // syscall entry-point:
- adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
- ;;
- ld8 r15=[r15]
- mov r3=NR_syscalls - 1
- ;;
- adds r15=-1024,r15
- movl r16=sys_call_table
- ;;
- shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
- cmp.leu p6,p7=r15,r3
- ;;
-(p6) ld8 r20=[r20] // load address of syscall entry point
-(p7) movl r20=sys_ni_syscall
- ;;
- mov b6=r20
- br.call.sptk.many rp=b6 // do the syscall
-.strace_check_retval:
- cmp.lt p6,p0=r8,r0 // syscall failed?
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
- mov r10=0
-(p6) br.cond.sptk strace_error // syscall failed ->
- ;; // avoid RAW on r10
-.strace_save_retval:
-.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
-.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
- br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
-.ret3:
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-(pUStk) rsm psr.i // disable interrupts
- br.cond.sptk ia64_work_pending_syscall_end
-
-strace_error:
- ld8 r3=[r2] // load pt_regs.r8
- sub r9=0,r8 // negate return value to get errno value
- ;;
- cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
- adds r3=16,r2 // r3=&pt_regs.r10
- ;;
-(p6) mov r10=-1
-(p6) mov r8=r9
- br.cond.sptk .strace_save_retval
-END(ia64_trace_syscall)
-
- /*
- * When traced and returning from sigreturn, we invoke syscall_trace but then
- * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
- */
-GLOBAL_ENTRY(ia64_strace_leave_kernel)
- PT_REGS_UNWIND_INFO(0)
-{ /*
- * Some versions of gas generate bad unwind info if the first instruction of a
- * procedure doesn't go into the first slot of a bundle. This is a workaround.
- */
- nop.m 0
- nop.i 0
- br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
-}
-.ret4: br.cond.sptk ia64_leave_kernel
-END(ia64_strace_leave_kernel)
-
-ENTRY(call_payload)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0)
- /* call the kernel_thread payload; fn is in r4, arg - in r5 */
- alloc loc1=ar.pfs,0,3,1,0
- mov loc0=rp
- mov loc2=gp
- mov out0=r5 // arg
- ld8 r14 = [r4], 8 // fn.address
- ;;
- mov b6 = r14
- ld8 gp = [r4] // fn.gp
- ;;
- br.call.sptk.many rp=b6 // fn(arg)
-.ret12: mov gp=loc2
- mov rp=loc0
- mov ar.pfs=loc1
- /* ... and if it has returned, we are going to userland */
- cmp.ne pKStk,pUStk=r0,r0
- br.ret.sptk.many rp
-END(call_payload)
-
-GLOBAL_ENTRY(ia64_ret_from_clone)
- PT_REGS_UNWIND_INFO(0)
-{ /*
- * Some versions of gas generate bad unwind info if the first instruction of a
- * procedure doesn't go into the first slot of a bundle. This is a workaround.
- */
- nop.m 0
- nop.i 0
- /*
- * We need to call schedule_tail() to complete the scheduling process.
- * Called by ia64_switch_to() after ia64_clone()->copy_thread(). r8 contains the
- * address of the previously executing task.
- */
- br.call.sptk.many rp=ia64_invoke_schedule_tail
-}
-.ret8:
-(pKStk) br.call.sptk.many rp=call_payload
- adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
- ld4 r2=[r2]
- ;;
- mov r8=0
- and r2=_TIF_SYSCALL_TRACEAUDIT,r2
- ;;
- cmp.ne p6,p0=r2,r0
-(p6) br.cond.spnt .strace_check_retval
- ;; // added stop bits to prevent r8 dependency
-END(ia64_ret_from_clone)
- // fall through
-GLOBAL_ENTRY(ia64_ret_from_syscall)
- PT_REGS_UNWIND_INFO(0)
- cmp.ge p6,p7=r8,r0 // syscall executed successfully?
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- mov r10=r0 // clear error indication in r10
-(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
-END(ia64_ret_from_syscall)
- // fall through
-
-/*
- * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
- * need to switch to bank 0 and doesn't restore the scratch registers.
- * To avoid leaking kernel bits, the scratch registers are set to
- * the following known-to-be-safe values:
- *
- * r1: restored (global pointer)
- * r2: cleared
- * r3: 1 (when returning to user-level)
- * r8-r11: restored (syscall return value(s))
- * r12: restored (user-level stack pointer)
- * r13: restored (user-level thread pointer)
- * r14: set to __kernel_syscall_via_epc
- * r15: restored (syscall #)
- * r16-r17: cleared
- * r18: user-level b6
- * r19: cleared
- * r20: user-level ar.fpsr
- * r21: user-level b0
- * r22: cleared
- * r23: user-level ar.bspstore
- * r24: user-level ar.rnat
- * r25: user-level ar.unat
- * r26: user-level ar.pfs
- * r27: user-level ar.rsc
- * r28: user-level ip
- * r29: user-level psr
- * r30: user-level cfm
- * r31: user-level pr
- * f6-f11: cleared
- * pr: restored (user-level pr)
- * b0: restored (user-level rp)
- * b6: restored
- * b7: set to __kernel_syscall_via_epc
- * ar.unat: restored (user-level ar.unat)
- * ar.pfs: restored (user-level ar.pfs)
- * ar.rsc: restored (user-level ar.rsc)
- * ar.rnat: restored (user-level ar.rnat)
- * ar.bspstore: restored (user-level ar.bspstore)
- * ar.fpsr: restored (user-level ar.fpsr)
- * ar.ccv: cleared
- * ar.csd: cleared
- * ar.ssd: cleared
- */
-GLOBAL_ENTRY(ia64_leave_syscall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it returns to
- * user- or fsys-mode, hence we disable interrupts early on.
- *
- * p6 controls whether current_thread_info()->flags needs to be check for
- * extra work. We always check for extra work when returning to user-level.
- * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
- * is 0. After extra work processing has been completed, execution
- * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
- * needs to be redone.
- */
-#ifdef CONFIG_PREEMPTION
- RSM_PSR_I(p0, r2, r18) // disable interrupts
- cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
- ;;
- .pred.rel.mutex pUStk,pKStk
-(pKStk) ld4 r21=[r20] // r21 <- preempt_count
-(pUStk) mov r21=0 // r21 <- 0
- ;;
- cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
-#else /* !CONFIG_PREEMPTION */
- RSM_PSR_I(pUStk, r2, r18)
- cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-#endif
-.global ia64_work_processed_syscall;
-ia64_work_processed_syscall:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- adds r2=PT(LOADRS)+16,r12
- MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
- adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
-(p6) ld4 r31=[r18] // load current_thread_info()->flags
- ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
- adds r3=PT(AR_BSPSTORE)+16,r12 // deferred
- ;;
-#else
- adds r2=PT(LOADRS)+16,r12
- adds r3=PT(AR_BSPSTORE)+16,r12
- adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
-(p6) ld4 r31=[r18] // load current_thread_info()->flags
- ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
- nop.i 0
- ;;
-#endif
- mov r16=ar.bsp // M2 get existing backing store pointer
- ld8 r18=[r2],PT(R9)-PT(B6) // load b6
-(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
- ;;
- ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
-(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
-(p6) br.cond.spnt .work_pending_syscall
- ;;
- // start restoring the state saved on the kernel stack (struct pt_regs):
- ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
- ld8 r11=[r3],PT(CR_IIP)-PT(R11)
-(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
- ;;
- invala // M0|1 invalidate ALAT
- RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection
- cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
-
- ld8 r29=[r2],16 // M0|1 load cr.ipsr
- ld8 r28=[r3],16 // M0|1 load cr.iip
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
- ;;
- ld8 r30=[r2],16 // M0|1 load cr.ifs
- ld8 r25=[r3],16 // M0|1 load ar.unat
-(pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
- ;;
-#else
- mov r22=r0 // A clear r22
- ;;
- ld8 r30=[r2],16 // M0|1 load cr.ifs
- ld8 r25=[r3],16 // M0|1 load ar.unat
-(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
- ;;
-#endif
- ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
- MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled
- nop 0
- ;;
- ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
- ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
- mov f6=f0 // F clear f6
- ;;
- ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
- ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
- mov f7=f0 // F clear f7
- ;;
- ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
- ld8.fill r1=[r3],16 // M0|1 load r1
-(pUStk) mov r17=1 // A
- ;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-(pUStk) st1 [r15]=r17 // M2|3
-#else
-(pUStk) st1 [r14]=r17 // M2|3
-#endif
- ld8.fill r13=[r3],16 // M0|1
- mov f8=f0 // F clear f8
- ;;
- ld8.fill r12=[r2] // M0|1 restore r12 (sp)
- ld8.fill r15=[r3] // M0|1 restore r15
- mov b6=r18 // I0 restore b6
-
- LOAD_PHYS_STACK_REG_SIZE(r17)
- mov f9=f0 // F clear f9
-(pKStk) br.cond.dpnt.many skip_rbs_switch // B
-
- srlz.d // M0 ensure interruption collection is off (for cover)
- shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
- COVER // B add current frame into dirty partition & set cr.ifs
- ;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- mov r19=ar.bsp // M2 get new backing store pointer
- st8 [r14]=r22 // M save time at leave
- mov f10=f0 // F clear f10
-
- mov r22=r0 // A clear r22
- movl r14=__kernel_syscall_via_epc // X
- ;;
-#else
- mov r19=ar.bsp // M2 get new backing store pointer
- mov f10=f0 // F clear f10
-
- nop.m 0
- movl r14=__kernel_syscall_via_epc // X
- ;;
-#endif
- mov.m ar.csd=r0 // M2 clear ar.csd
- mov.m ar.ccv=r0 // M2 clear ar.ccv
- mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
-
- mov.m ar.ssd=r0 // M2 clear ar.ssd
- mov f11=f0 // F clear f11
- br.cond.sptk.many rbs_switch // B
-END(ia64_leave_syscall)
-
-GLOBAL_ENTRY(ia64_leave_kernel)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it returns to
- * user- or fsys-mode, hence we disable interrupts early on.
- *
- * p6 controls whether current_thread_info()->flags needs to be check for
- * extra work. We always check for extra work when returning to user-level.
- * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
- * is 0. After extra work processing has been completed, execution
- * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
- * needs to be redone.
- */
-#ifdef CONFIG_PREEMPTION
- RSM_PSR_I(p0, r17, r31) // disable interrupts
- cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
- ;;
- .pred.rel.mutex pUStk,pKStk
-(pKStk) ld4 r21=[r20] // r21 <- preempt_count
-(pUStk) mov r21=0 // r21 <- 0
- ;;
- cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
-#else
- RSM_PSR_I(pUStk, r17, r31)
- cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-#endif
-.work_processed_kernel:
- adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
-(p6) ld4 r31=[r17] // load current_thread_info()->flags
- adds r21=PT(PR)+16,r12
- ;;
-
- lfetch [r21],PT(CR_IPSR)-PT(PR)
- adds r2=PT(B6)+16,r12
- adds r3=PT(R16)+16,r12
- ;;
- lfetch [r21]
- ld8 r28=[r2],8 // load b6
- adds r29=PT(R24)+16,r12
-
- ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
- adds r30=PT(AR_CCV)+16,r12
-(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
- ;;
- ld8.fill r24=[r29]
- ld8 r15=[r30] // load ar.ccv
-(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
- ;;
- ld8 r29=[r2],16 // load b7
- ld8 r30=[r3],16 // load ar.csd
-(p6) br.cond.spnt .work_pending
- ;;
- ld8 r31=[r2],16 // load ar.ssd
- ld8.fill r8=[r3],16
- ;;
- ld8.fill r9=[r2],16
- ld8.fill r10=[r3],PT(R17)-PT(R10)
- ;;
- ld8.fill r11=[r2],PT(R18)-PT(R11)
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- mov ar.csd=r30
- mov ar.ssd=r31
- ;;
- RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
- ;;
- ld8.fill r22=[r2],24
- ld8.fill r23=[r3],24
- mov b6=r28
- ;;
- ld8.fill r25=[r2],16
- ld8.fill r26=[r3],16
- mov b7=r29
- ;;
- ld8.fill r27=[r2],16
- ld8.fill r28=[r3],16
- ;;
- ld8.fill r29=[r2],16
- ld8.fill r30=[r3],24
- ;;
- ld8.fill r31=[r2],PT(F9)-PT(R31)
- adds r3=PT(F10)-PT(F6),r3
- ;;
- ldf.fill f9=[r2],PT(F6)-PT(F9)
- ldf.fill f10=[r3],PT(F8)-PT(F10)
- ;;
- ldf.fill f6=[r2],PT(F7)-PT(F6)
- ;;
- ldf.fill f7=[r2],PT(F11)-PT(F7)
- ldf.fill f8=[r3],32
- ;;
- srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
- mov ar.ccv=r15
- ;;
- ldf.fill f11=[r2]
- BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...)
- ;;
-(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
- adds r16=PT(CR_IPSR)+16,r12
- adds r17=PT(CR_IIP)+16,r12
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- .pred.rel.mutex pUStk,pKStk
- MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
- MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
- nop.i 0
- ;;
-#else
- MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
- nop.i 0
- nop.i 0
- ;;
-#endif
- ld8 r29=[r16],16 // load cr.ipsr
- ld8 r28=[r17],16 // load cr.iip
- ;;
- ld8 r30=[r16],16 // load cr.ifs
- ld8 r25=[r17],16 // load ar.unat
- ;;
- ld8 r26=[r16],16 // load ar.pfs
- ld8 r27=[r17],16 // load ar.rsc
- cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
- ;;
- ld8 r24=[r16],16 // load ar.rnat (may be garbage)
- ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
- ;;
- ld8 r31=[r16],16 // load predicates
- ld8 r21=[r17],16 // load b0
- ;;
- ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
- ld8.fill r1=[r17],16 // load r1
- ;;
- ld8.fill r12=[r16],16
- ld8.fill r13=[r17],16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
-#else
-(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
-#endif
- ;;
- ld8 r20=[r16],16 // ar.fpsr
- ld8.fill r15=[r17],16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
-#endif
- ;;
- ld8.fill r14=[r16],16
- ld8.fill r2=[r17]
-(pUStk) mov r17=1
- ;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
- // mib : mov add br -> mib : ld8 add br
- // bbb_ : br nop cover;; mbb_ : mov br cover;;
- //
- // no one require bsp in r16 if (pKStk) branch is selected.
-(pUStk) st8 [r3]=r22 // save time at leave
-(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
- shr.u r18=r19,16 // get byte size of existing "dirty" partition
- ;;
- ld8.fill r3=[r16] // deferred
- LOAD_PHYS_STACK_REG_SIZE(r17)
-(pKStk) br.cond.dpnt skip_rbs_switch
- mov r16=ar.bsp // get existing backing store pointer
-#else
- ld8.fill r3=[r16]
-(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
- shr.u r18=r19,16 // get byte size of existing "dirty" partition
- ;;
- mov r16=ar.bsp // get existing backing store pointer
- LOAD_PHYS_STACK_REG_SIZE(r17)
-(pKStk) br.cond.dpnt skip_rbs_switch
-#endif
-
- /*
- * Restore user backing store.
- *
- * NOTE: alloc, loadrs, and cover can't be predicated.
- */
-(pNonSys) br.cond.dpnt dont_preserve_current_frame
- COVER // add current frame into dirty partition and set cr.ifs
- ;;
- mov r19=ar.bsp // get new backing store pointer
-rbs_switch:
- sub r16=r16,r18 // krbs = old bsp - size of dirty partition
- cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
- ;;
- sub r19=r19,r16 // calculate total byte size of dirty partition
- add r18=64,r18 // don't force in0-in7 into memory...
- ;;
- shl r19=r19,16 // shift size of dirty partition into loadrs position
- ;;
-dont_preserve_current_frame:
- /*
- * To prevent leaking bits between the kernel and user-space,
- * we must clear the stacked registers in the "invalid" partition here.
- * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
- * 5 registers/cycle on McKinley).
- */
-# define pRecurse p6
-# define pReturn p7
-#ifdef CONFIG_ITANIUM
-# define Nregs 10
-#else
-# define Nregs 14
-#endif
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
- sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
- ;;
- mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
- shladd in0=loc1,3,r17
- mov in1=0
- ;;
- TEXT_ALIGN(32)
-rse_clear_invalid:
-#ifdef CONFIG_ITANIUM
- // cycle 0
- { .mii
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
- add out0=-Nregs*8,in0
-}{ .mfb
- add out1=1,in1 // increment recursion count
- nop.f 0
- nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
- ;;
-}{ .mfi // cycle 1
- mov loc1=0
- nop.f 0
- mov loc2=0
-}{ .mib
- mov loc3=0
- mov loc4=0
-(pRecurse) br.call.sptk.many b0=rse_clear_invalid
-
-}{ .mfi // cycle 2
- mov loc5=0
- nop.f 0
- cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
-}{ .mib
- mov loc6=0
- mov loc7=0
-(pReturn) br.ret.sptk.many b0
-}
-#else /* !CONFIG_ITANIUM */
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
- add out0=-Nregs*8,in0
- add out1=1,in1 // increment recursion count
- mov loc1=0
- mov loc2=0
- ;;
- mov loc3=0
- mov loc4=0
- mov loc5=0
- mov loc6=0
- mov loc7=0
-(pRecurse) br.call.dptk.few b0=rse_clear_invalid
- ;;
- mov loc8=0
- mov loc9=0
- cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
- mov loc10=0
- mov loc11=0
-(pReturn) br.ret.dptk.many b0
-#endif /* !CONFIG_ITANIUM */
-# undef pRecurse
-# undef pReturn
- ;;
- alloc r17=ar.pfs,0,0,0,0 // drop current register frame
- ;;
- loadrs
- ;;
-skip_rbs_switch:
- mov ar.unat=r25 // M2
-(pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
-(pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
- ;;
-(pUStk) mov ar.bspstore=r23 // M2
-(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
-(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
- ;;
- MOV_TO_IPSR(p0, r29, r25) // M2
- mov ar.pfs=r26 // I0
-(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
-
- MOV_TO_IFS(p9, r30, r25)// M2
- mov b0=r21 // I0
-(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
-
- mov ar.fpsr=r20 // M2
- MOV_TO_IIP(r28, r25) // M2
- nop 0
- ;;
-(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
- nop 0
-(pLvSys)mov r2=r0
-
- mov ar.rsc=r27 // M2
- mov pr=r31,-1 // I0
- RFI // B
-
- /*
- * On entry:
- * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPTION)
- * r31 = current->thread_info->flags
- * On exit:
- * p6 = TRUE if work-pending-check needs to be redone
- *
- * Interrupts are disabled on entry, reenabled depend on work, and
- * disabled on exit.
- */
-.work_pending_syscall:
- add r2=-8,r2
- add r3=-8,r3
- ;;
- st8 [r2]=r8
- st8 [r3]=r10
-.work_pending:
- tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed?
-(p6) br.cond.sptk.few .notify
- br.call.spnt.many rp=preempt_schedule_irq
-.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
-(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel
-
-.notify:
-(pUStk) br.call.spnt.many rp=notify_resume_user
-.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
-(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel
-
-.global ia64_work_pending_syscall_end;
-ia64_work_pending_syscall_end:
- adds r2=PT(R8)+16,r12
- adds r3=PT(R10)+16,r12
- ;;
- ld8 r8=[r2]
- ld8 r10=[r3]
- br.cond.sptk.many ia64_work_processed_syscall
-END(ia64_leave_kernel)
-
-ENTRY(handle_syscall_error)
- /*
- * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
- * lead us to mistake a negative return value as a failed syscall. Those syscall
- * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
- * pt_regs.r8 is zero, we assume that the call completed successfully.
- */
- PT_REGS_UNWIND_INFO(0)
- ld8 r3=[r2] // load pt_regs.r8
- ;;
- cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
- ;;
-(p7) mov r10=-1
-(p7) sub r8=0,r8 // negate return value to get errno
- br.cond.sptk ia64_leave_syscall
-END(handle_syscall_error)
-
- /*
- * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
- * in case a system call gets restarted.
- */
-GLOBAL_ENTRY(ia64_invoke_schedule_tail)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,1,0
- mov loc0=rp
- mov out0=r8 // Address of previous task
- ;;
- br.call.sptk.many rp=schedule_tail
-.ret11: mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(ia64_invoke_schedule_tail)
-
- /*
- * Setup stack and call do_notify_resume_user(), keeping interrupts
- * disabled.
- *
- * Note that pSys and pNonSys need to be set up by the caller.
- * We declare 8 input registers so the system call args get preserved,
- * in case we need to restart a system call.
- */
-GLOBAL_ENTRY(notify_resume_user)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
- mov r9=ar.unat
- mov loc0=rp // save return address
- mov out0=0 // there is no "oldset"
- adds out1=8,sp // out1=&sigscratch->ar_pfs
-(pSys) mov out2=1 // out2==1 => we're in a syscall
- ;;
-(pNonSys) mov out2=0 // out2==0 => not a syscall
- .fframe 16
- .spillsp ar.unat, 16
- st8 [sp]=r9,-16 // allocate space for ar.unat and save it
- st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
- .body
- br.call.sptk.many rp=do_notify_resume_user
-.ret15: .restore sp
- adds sp=16,sp // pop scratch stack space
- ;;
- ld8 r9=[sp] // load new unat from sigscratch->scratch_unat
- mov rp=loc0
- ;;
- mov ar.unat=r9
- mov ar.pfs=loc1
- br.ret.sptk.many rp
-END(notify_resume_user)
-
-ENTRY(sys_rt_sigreturn)
- PT_REGS_UNWIND_INFO(0)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- alloc r2=ar.pfs,8,0,1,0
- .prologue
- PT_REGS_SAVES(16)
- adds sp=-16,sp
- .body
- cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
- ;;
- /*
- * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
- * syscall-entry path does not save them we save them here instead. Note: we
- * don't need to save any other registers that are not saved by the stream-lined
- * syscall path, because restore_sigcontext() restores them.
- */
- adds r16=PT(F6)+32,sp
- adds r17=PT(F7)+32,sp
- ;;
- stf.spill [r16]=f6,32
- stf.spill [r17]=f7,32
- ;;
- stf.spill [r16]=f8,32
- stf.spill [r17]=f9,32
- ;;
- stf.spill [r16]=f10
- stf.spill [r17]=f11
- adds out0=16,sp // out0 = &sigscratch
- br.call.sptk.many rp=ia64_rt_sigreturn
-.ret19: .restore sp,0
- adds sp=16,sp
- ;;
- ld8 r9=[sp] // load new ar.unat
- mov.sptk b7=r8,ia64_leave_kernel
- ;;
- mov ar.unat=r9
- br.many b7
-END(sys_rt_sigreturn)
-
-GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
- .prologue
- /*
- * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
- */
- mov r16=r0
- DO_SAVE_SWITCH_STACK
- br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt
-.ret21: .body
- DO_LOAD_SWITCH_STACK
- br.cond.sptk.many rp // goes to ia64_leave_kernel
-END(ia64_prepare_handle_unaligned)
-
- //
- // unw_init_running(void (*callback)(info, arg), void *arg)
- //
-# define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
-
-GLOBAL_ENTRY(unw_init_running)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
- alloc loc1=ar.pfs,2,3,3,0
- ;;
- ld8 loc2=[in0],8
- mov loc0=rp
- mov r16=loc1
- DO_SAVE_SWITCH_STACK
- .body
-
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
- .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
- SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
- adds sp=-EXTRA_FRAME_SIZE,sp
- .body
- ;;
- adds out0=16,sp // &info
- mov out1=r13 // current
- adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
- br.call.sptk.many rp=unw_init_frame_info
-1: adds out0=16,sp // &info
- mov b6=loc2
- mov loc2=gp // save gp across indirect function call
- ;;
- ld8 gp=[in0]
- mov out1=in1 // arg
- br.call.sptk.many rp=b6 // invoke the callback function
-1: mov gp=loc2 // restore gp
-
- // For now, we don't allow changing registers from within
- // unw_init_running; if we ever want to allow that, we'd
- // have to do a load_switch_stack here:
- .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
-
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(unw_init_running)
-EXPORT_SYMBOL(unw_init_running)
-
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-GLOBAL_ENTRY(_mcount)
- br ftrace_stub
-END(_mcount)
-EXPORT_SYMBOL(_mcount)
-
-.here:
- br.ret.sptk.many b0
-
-GLOBAL_ENTRY(ftrace_caller)
- alloc out0 = ar.pfs, 8, 0, 4, 0
- mov out3 = r0
- ;;
- mov out2 = b0
- add r3 = 0x20, r3
- mov out1 = r1;
- br.call.sptk.many b0 = ftrace_patch_gp
- //this might be called from module, so we must patch gp
-ftrace_patch_gp:
- movl gp=__gp
- mov b0 = r3
- ;;
-.global ftrace_call;
-ftrace_call:
-{
- .mlx
- nop.m 0x0
- movl r3 = .here;;
-}
- alloc loc0 = ar.pfs, 4, 4, 2, 0
- ;;
- mov loc1 = b0
- mov out0 = b0
- mov loc2 = r8
- mov loc3 = r15
- ;;
- adds out0 = -MCOUNT_INSN_SIZE, out0
- mov out1 = in2
- mov b6 = r3
-
- br.call.sptk.many b0 = b6
- ;;
- mov ar.pfs = loc0
- mov b0 = loc1
- mov r8 = loc2
- mov r15 = loc3
- br ftrace_stub
- ;;
-END(ftrace_caller)
-
-#else
-GLOBAL_ENTRY(_mcount)
- movl r2 = ftrace_stub
- movl r3 = ftrace_trace_function;;
- ld8 r3 = [r3];;
- ld8 r3 = [r3];;
- cmp.eq p7,p0 = r2, r3
-(p7) br.sptk.many ftrace_stub
- ;;
-
- alloc loc0 = ar.pfs, 4, 4, 2, 0
- ;;
- mov loc1 = b0
- mov out0 = b0
- mov loc2 = r8
- mov loc3 = r15
- ;;
- adds out0 = -MCOUNT_INSN_SIZE, out0
- mov out1 = in2
- mov b6 = r3
-
- br.call.sptk.many b0 = b6
- ;;
- mov ar.pfs = loc0
- mov b0 = loc1
- mov r8 = loc2
- mov r15 = loc3
- br ftrace_stub
- ;;
-END(_mcount)
-#endif
-
-GLOBAL_ENTRY(ftrace_stub)
- mov r3 = b0
- movl r2 = _mcount_ret_helper
- ;;
- mov b6 = r2
- mov b7 = r3
- br.ret.sptk.many b6
-
-_mcount_ret_helper:
- mov b0 = r42
- mov r1 = r41
- mov ar.pfs = r40
- br b7
-END(ftrace_stub)
-
-#endif /* CONFIG_FUNCTION_TRACER */
-
-#define __SYSCALL(nr, entry) data8 entry
- .rodata
- .align 8
- .globl sys_call_table
-sys_call_table:
-#include <asm/syscall_table.h>
diff --git a/arch/ia64/kernel/entry.h b/arch/ia64/kernel/entry.h
deleted file mode 100644
index 6463dc3162..0000000000
--- a/arch/ia64/kernel/entry.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * Preserved registers that are shared between code in ivt.S and
- * entry.S. Be careful not to step on these!
- */
-#define PRED_LEAVE_SYSCALL 1 /* TRUE iff leave from syscall */
-#define PRED_KERNEL_STACK 2 /* returning to kernel-stacks? */
-#define PRED_USER_STACK 3 /* returning to user-stacks? */
-#define PRED_SYSCALL 4 /* inside a system call? */
-#define PRED_NON_SYSCALL 5 /* complement of PRED_SYSCALL */
-
-#ifdef __ASSEMBLY__
-# define PASTE2(x,y) x##y
-# define PASTE(x,y) PASTE2(x,y)
-
-# define pLvSys PASTE(p,PRED_LEAVE_SYSCALL)
-# define pKStk PASTE(p,PRED_KERNEL_STACK)
-# define pUStk PASTE(p,PRED_USER_STACK)
-# define pSys PASTE(p,PRED_SYSCALL)
-# define pNonSys PASTE(p,PRED_NON_SYSCALL)
-#endif
-
-#define PT(f) (IA64_PT_REGS_##f##_OFFSET)
-#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
-#define SOS(f) (IA64_SAL_OS_STATE_##f##_OFFSET)
-
-#define PT_REGS_SAVES(off) \
- .unwabi 3, 'i'; \
- .fframe IA64_PT_REGS_SIZE+16+(off); \
- .spillsp rp, PT(CR_IIP)+16+(off); \
- .spillsp ar.pfs, PT(CR_IFS)+16+(off); \
- .spillsp ar.unat, PT(AR_UNAT)+16+(off); \
- .spillsp ar.fpsr, PT(AR_FPSR)+16+(off); \
- .spillsp pr, PT(PR)+16+(off);
-
-#define PT_REGS_UNWIND_INFO(off) \
- .prologue; \
- PT_REGS_SAVES(off); \
- .body
-
-#define SWITCH_STACK_SAVES(off) \
- .savesp ar.unat,SW(CALLER_UNAT)+16+(off); \
- .savesp ar.fpsr,SW(AR_FPSR)+16+(off); \
- .spillsp f2,SW(F2)+16+(off); .spillsp f3,SW(F3)+16+(off); \
- .spillsp f4,SW(F4)+16+(off); .spillsp f5,SW(F5)+16+(off); \
- .spillsp f16,SW(F16)+16+(off); .spillsp f17,SW(F17)+16+(off); \
- .spillsp f18,SW(F18)+16+(off); .spillsp f19,SW(F19)+16+(off); \
- .spillsp f20,SW(F20)+16+(off); .spillsp f21,SW(F21)+16+(off); \
- .spillsp f22,SW(F22)+16+(off); .spillsp f23,SW(F23)+16+(off); \
- .spillsp f24,SW(F24)+16+(off); .spillsp f25,SW(F25)+16+(off); \
- .spillsp f26,SW(F26)+16+(off); .spillsp f27,SW(F27)+16+(off); \
- .spillsp f28,SW(F28)+16+(off); .spillsp f29,SW(F29)+16+(off); \
- .spillsp f30,SW(F30)+16+(off); .spillsp f31,SW(F31)+16+(off); \
- .spillsp r4,SW(R4)+16+(off); .spillsp r5,SW(R5)+16+(off); \
- .spillsp r6,SW(R6)+16+(off); .spillsp r7,SW(R7)+16+(off); \
- .spillsp b0,SW(B0)+16+(off); .spillsp b1,SW(B1)+16+(off); \
- .spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off); \
- .spillsp b4,SW(B4)+16+(off); .spillsp b5,SW(B5)+16+(off); \
- .spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off); \
- .spillsp @priunat,SW(AR_UNAT)+16+(off); \
- .spillsp ar.rnat,SW(AR_RNAT)+16+(off); \
- .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off); \
- .spillsp pr,SW(PR)+16+(off)
-
-#define DO_SAVE_SWITCH_STACK \
- movl r28=1f; \
- ;; \
- .fframe IA64_SWITCH_STACK_SIZE; \
- adds sp=-IA64_SWITCH_STACK_SIZE,sp; \
- mov.ret.sptk b7=r28,1f; \
- SWITCH_STACK_SAVES(0); \
- br.cond.sptk.many save_switch_stack; \
-1:
-
-#define DO_LOAD_SWITCH_STACK \
- movl r28=1f; \
- ;; \
- invala; \
- mov.ret.sptk b7=r28,1f; \
- br.cond.sptk.many load_switch_stack; \
-1: .restore sp; \
- adds sp=IA64_SWITCH_STACK_SIZE,sp
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
deleted file mode 100644
index dd5bfed520..0000000000
--- a/arch/ia64/kernel/err_inject.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * err_inject.c -
- * 1.) Inject errors to a processor.
- * 2.) Query error injection capabilities.
- * This driver along with user space code can be acting as an error
- * injection tool.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Written by: Fenghua Yu <fenghua.yu@intel.com>, Intel Corporation
- * Copyright (C) 2006, Intel Corp. All rights reserved.
- *
- */
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/cpu.h>
-#include <linux/module.h>
-
-#define ERR_INJ_DEBUG
-
-#define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte;
-
-#define define_one_ro(name) \
-static DEVICE_ATTR(name, 0444, show_##name, NULL)
-
-#define define_one_rw(name) \
-static DEVICE_ATTR(name, 0644, show_##name, store_##name)
-
-static u64 call_start[NR_CPUS];
-static u64 phys_addr[NR_CPUS];
-static u64 err_type_info[NR_CPUS];
-static u64 err_struct_info[NR_CPUS];
-static struct {
- u64 data1;
- u64 data2;
- u64 data3;
-} __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS];
-static s64 status[NR_CPUS];
-static u64 capabilities[NR_CPUS];
-static u64 resources[NR_CPUS];
-
-#define show(name) \
-static ssize_t \
-show_##name(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- u32 cpu=dev->id; \
- return sprintf(buf, "%llx\n", name[cpu]); \
-}
-
-#define store(name) \
-static ssize_t \
-store_##name(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t size) \
-{ \
- unsigned int cpu=dev->id; \
- name[cpu] = simple_strtoull(buf, NULL, 16); \
- return size; \
-}
-
-show(call_start)
-
-/* It's user's responsibility to call the PAL procedure on a specific
- * processor. The cpu number in driver is only used for storing data.
- */
-static ssize_t
-store_call_start(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned int cpu=dev->id;
- unsigned long call_start = simple_strtoull(buf, NULL, 16);
-
-#ifdef ERR_INJ_DEBUG
- printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
- printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
- printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
- printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
- err_data_buffer[cpu].data1,
- err_data_buffer[cpu].data2,
- err_data_buffer[cpu].data3);
-#endif
- switch (call_start) {
- case 0: /* Do nothing. */
- break;
- case 1: /* Call pal_mc_error_inject in physical mode. */
- status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu],
- err_struct_info[cpu],
- ia64_tpa(&err_data_buffer[cpu]),
- &capabilities[cpu],
- &resources[cpu]);
- break;
- case 2: /* Call pal_mc_error_inject in virtual mode. */
- status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu],
- err_struct_info[cpu],
- ia64_tpa(&err_data_buffer[cpu]),
- &capabilities[cpu],
- &resources[cpu]);
- break;
- default:
- status[cpu] = -EINVAL;
- break;
- }
-
-#ifdef ERR_INJ_DEBUG
- printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
- printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
- printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
-#endif
- return size;
-}
-
-show(err_type_info)
-store(err_type_info)
-
-static ssize_t
-show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- unsigned int cpu=dev->id;
- return sprintf(buf, "%llx\n", phys_addr[cpu]);
-}
-
-static ssize_t
-store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned int cpu=dev->id;
- u64 virt_addr=simple_strtoull(buf, NULL, 16);
- int ret;
-
- ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
- if (ret<=0) {
-#ifdef ERR_INJ_DEBUG
- printk("Virtual address %llx is not existing.\n", virt_addr);
-#endif
- return -EINVAL;
- }
-
- phys_addr[cpu] = ia64_tpa(virt_addr);
- return size;
-}
-
-show(err_struct_info)
-store(err_struct_info)
-
-static ssize_t
-show_err_data_buffer(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned int cpu=dev->id;
-
- return sprintf(buf, "%llx, %llx, %llx\n",
- err_data_buffer[cpu].data1,
- err_data_buffer[cpu].data2,
- err_data_buffer[cpu].data3);
-}
-
-static ssize_t
-store_err_data_buffer(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned int cpu=dev->id;
- int ret;
-
-#ifdef ERR_INJ_DEBUG
- printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
- err_data_buffer[cpu].data1,
- err_data_buffer[cpu].data2,
- err_data_buffer[cpu].data3,
- cpu);
-#endif
- ret = sscanf(buf, "%llx, %llx, %llx",
- &err_data_buffer[cpu].data1,
- &err_data_buffer[cpu].data2,
- &err_data_buffer[cpu].data3);
- if (ret!=ERR_DATA_BUFFER_SIZE)
- return -EINVAL;
-
- return size;
-}
-
-show(status)
-show(capabilities)
-show(resources)
-
-define_one_rw(call_start);
-define_one_rw(err_type_info);
-define_one_rw(err_struct_info);
-define_one_rw(err_data_buffer);
-define_one_rw(virtual_to_phys);
-define_one_ro(status);
-define_one_ro(capabilities);
-define_one_ro(resources);
-
-static struct attribute *default_attrs[] = {
- &dev_attr_call_start.attr,
- &dev_attr_virtual_to_phys.attr,
- &dev_attr_err_type_info.attr,
- &dev_attr_err_struct_info.attr,
- &dev_attr_err_data_buffer.attr,
- &dev_attr_status.attr,
- &dev_attr_capabilities.attr,
- &dev_attr_resources.attr,
- NULL
-};
-
-static struct attribute_group err_inject_attr_group = {
- .attrs = default_attrs,
- .name = "err_inject"
-};
-/* Add/Remove err_inject interface for CPU device */
-static int err_inject_add_dev(unsigned int cpu)
-{
- struct device *sys_dev = get_cpu_device(cpu);
-
- return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
-}
-
-static int err_inject_remove_dev(unsigned int cpu)
-{
- struct device *sys_dev = get_cpu_device(cpu);
-
- sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
- return 0;
-}
-
-static enum cpuhp_state hp_online;
-
-static int __init err_inject_init(void)
-{
- int ret;
-#ifdef ERR_INJ_DEBUG
- printk(KERN_INFO "Enter error injection driver.\n");
-#endif
-
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/err_inj:online",
- err_inject_add_dev, err_inject_remove_dev);
- if (ret >= 0) {
- hp_online = ret;
- ret = 0;
- }
- return ret;
-}
-
-static void __exit err_inject_exit(void)
-{
-#ifdef ERR_INJ_DEBUG
- printk(KERN_INFO "Exit error injection driver.\n");
-#endif
- cpuhp_remove_state(hp_online);
-}
-
-module_init(err_inject_init);
-module_exit(err_inject_exit);
-
-MODULE_AUTHOR("Fenghua Yu <fenghua.yu@intel.com>");
-MODULE_DESCRIPTION("MC error injection kernel sysfs interface");
-MODULE_LICENSE("GPL");
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
deleted file mode 100644
index 4df57c93e0..0000000000
--- a/arch/ia64/kernel/esi.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Extensible SAL Interface (ESI) support routines.
- *
- * Copyright (C) 2006 Hewlett-Packard Co
- * Alex Williamson <alex.williamson@hp.com>
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <asm/esi.h>
-#include <asm/sal.h>
-
-MODULE_AUTHOR("Alex Williamson <alex.williamson@hp.com>");
-MODULE_DESCRIPTION("Extensible SAL Interface (ESI) support");
-MODULE_LICENSE("GPL");
-
-#define MODULE_NAME "esi"
-
-enum esi_systab_entry_type {
- ESI_DESC_ENTRY_POINT = 0
-};
-
-/*
- * Entry type: Size:
- * 0 48
- */
-#define ESI_DESC_SIZE(type) "\060"[(unsigned) (type)]
-
-typedef struct ia64_esi_desc_entry_point {
- u8 type;
- u8 reserved1[15];
- u64 esi_proc;
- u64 gp;
- efi_guid_t guid;
-} ia64_esi_desc_entry_point_t;
-
-struct pdesc {
- void *addr;
- void *gp;
-};
-
-static struct ia64_sal_systab *esi_systab;
-
-extern unsigned long esi_phys;
-
-static int __init esi_init (void)
-{
- struct ia64_sal_systab *systab;
- char *p;
- int i;
-
- if (esi_phys == EFI_INVALID_TABLE_ADDR)
- return -ENODEV;
-
- systab = __va(esi_phys);
-
- if (strncmp(systab->signature, "ESIT", 4) != 0) {
- printk(KERN_ERR "bad signature in ESI system table!");
- return -ENODEV;
- }
-
- p = (char *) (systab + 1);
- for (i = 0; i < systab->entry_count; i++) {
- /*
- * The first byte of each entry type contains the type
- * descriptor.
- */
- switch (*p) {
- case ESI_DESC_ENTRY_POINT:
- break;
- default:
- printk(KERN_WARNING "Unknown table type %d found in "
- "ESI table, ignoring rest of table\n", *p);
- return -ENODEV;
- }
-
- p += ESI_DESC_SIZE(*p);
- }
-
- esi_systab = systab;
- return 0;
-}
-
-
-int ia64_esi_call (efi_guid_t guid, struct ia64_sal_retval *isrvp,
- enum esi_proc_type proc_type, u64 func,
- u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
- u64 arg7)
-{
- struct ia64_fpreg fr[6];
- unsigned long flags = 0;
- int i;
- char *p;
-
- if (!esi_systab)
- return -1;
-
- p = (char *) (esi_systab + 1);
- for (i = 0; i < esi_systab->entry_count; i++) {
- if (*p == ESI_DESC_ENTRY_POINT) {
- ia64_esi_desc_entry_point_t *esi = (void *)p;
- if (!efi_guidcmp(guid, esi->guid)) {
- ia64_sal_handler esi_proc;
- struct pdesc pdesc;
-
- pdesc.addr = __va(esi->esi_proc);
- pdesc.gp = __va(esi->gp);
-
- esi_proc = (ia64_sal_handler) &pdesc;
-
- ia64_save_scratch_fpregs(fr);
- if (proc_type == ESI_PROC_SERIALIZED)
- spin_lock_irqsave(&sal_lock, flags);
- else if (proc_type == ESI_PROC_MP_SAFE)
- local_irq_save(flags);
- else
- preempt_disable();
- *isrvp = (*esi_proc)(func, arg1, arg2, arg3,
- arg4, arg5, arg6, arg7);
- if (proc_type == ESI_PROC_SERIALIZED)
- spin_unlock_irqrestore(&sal_lock,
- flags);
- else if (proc_type == ESI_PROC_MP_SAFE)
- local_irq_restore(flags);
- else
- preempt_enable();
- ia64_load_scratch_fpregs(fr);
- return 0;
- }
- }
- p += ESI_DESC_SIZE(*p);
- }
- return -1;
-}
-EXPORT_SYMBOL_GPL(ia64_esi_call);
-
-int ia64_esi_call_phys (efi_guid_t guid, struct ia64_sal_retval *isrvp,
- u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4,
- u64 arg5, u64 arg6, u64 arg7)
-{
- struct ia64_fpreg fr[6];
- unsigned long flags;
- u64 esi_params[8];
- char *p;
- int i;
-
- if (!esi_systab)
- return -1;
-
- p = (char *) (esi_systab + 1);
- for (i = 0; i < esi_systab->entry_count; i++) {
- if (*p == ESI_DESC_ENTRY_POINT) {
- ia64_esi_desc_entry_point_t *esi = (void *)p;
- if (!efi_guidcmp(guid, esi->guid)) {
- ia64_sal_handler esi_proc;
- struct pdesc pdesc;
-
- pdesc.addr = (void *)esi->esi_proc;
- pdesc.gp = (void *)esi->gp;
-
- esi_proc = (ia64_sal_handler) &pdesc;
-
- esi_params[0] = func;
- esi_params[1] = arg1;
- esi_params[2] = arg2;
- esi_params[3] = arg3;
- esi_params[4] = arg4;
- esi_params[5] = arg5;
- esi_params[6] = arg6;
- esi_params[7] = arg7;
- ia64_save_scratch_fpregs(fr);
- spin_lock_irqsave(&sal_lock, flags);
- *isrvp = esi_call_phys(esi_proc, esi_params);
- spin_unlock_irqrestore(&sal_lock, flags);
- ia64_load_scratch_fpregs(fr);
- return 0;
- }
- }
- p += ESI_DESC_SIZE(*p);
- }
- return -1;
-}
-EXPORT_SYMBOL_GPL(ia64_esi_call_phys);
-
-static void __exit esi_exit (void)
-{
-}
-
-module_init(esi_init);
-module_exit(esi_exit); /* makes module removable... */
diff --git a/arch/ia64/kernel/esi_stub.S b/arch/ia64/kernel/esi_stub.S
deleted file mode 100644
index 9928c5b295..0000000000
--- a/arch/ia64/kernel/esi_stub.S
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ESI call stub.
- *
- * Copyright (C) 2005 Hewlett-Packard Co
- * Alex Williamson <alex.williamson@hp.com>
- *
- * Based on EFI call stub by David Mosberger. The stub is virtually
- * identical to the one for EFI phys-mode calls, except that ESI
- * calls may have up to 8 arguments, so they get passed to this routine
- * through memory.
- *
- * This stub allows us to make ESI calls in physical mode with interrupts
- * turned off. ESI calls may not support calling from virtual mode.
- *
- * Google for "Extensible SAL specification" for a document describing the
- * ESI standard.
- */
-
-/*
- * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System
- * Abstraction Layer Specification", revision 2.6e). Note that
- * psr.dfl and psr.dfh MUST be cleared, despite what this manual says.
- * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call
- * (the br.ia instruction fails unless psr.dfl and psr.dfh are
- * cleared). Fortunately, SAL promises not to touch the floating
- * point regs, so at least we don't have to save f2-f127.
- */
-#define PSR_BITS_TO_CLEAR \
- (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
- IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
- IA64_PSR_DFL | IA64_PSR_DFH)
-
-#define PSR_BITS_TO_SET \
- (IA64_PSR_BN)
-
-#include <linux/export.h>
-#include <asm/processor.h>
-#include <asm/asmmacro.h>
-
-/*
- * Inputs:
- * in0 = address of function descriptor of ESI routine to call
- * in1 = address of array of ESI parameters
- *
- * Outputs:
- * r8 = result returned by called function
- */
-GLOBAL_ENTRY(esi_call_phys)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
- alloc loc1=ar.pfs,2,7,8,0
- ld8 r2=[in0],8 // load ESI function's entry point
- mov loc0=rp
- .body
- ;;
- ld8 out0=[in1],8 // ESI params loaded from array
- ;; // passing all as inputs doesn't work
- ld8 out1=[in1],8
- ;;
- ld8 out2=[in1],8
- ;;
- ld8 out3=[in1],8
- ;;
- ld8 out4=[in1],8
- ;;
- ld8 out5=[in1],8
- ;;
- ld8 out6=[in1],8
- ;;
- ld8 out7=[in1]
- mov loc2=gp // save global pointer
- mov loc4=ar.rsc // save RSE configuration
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- ;;
- ld8 gp=[in0] // load ESI function's global pointer
- movl r16=PSR_BITS_TO_CLEAR
- mov loc3=psr // save processor status word
- movl r17=PSR_BITS_TO_SET
- ;;
- or loc3=loc3,r17
- mov b6=r2
- ;;
- andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
- br.call.sptk.many rp=ia64_switch_mode_phys
-.ret0: mov loc5=r19 // old ar.bsp
- mov loc6=r20 // old sp
- br.call.sptk.many rp=b6 // call the ESI function
-.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov r16=loc3 // save virtual mode psr
- mov r19=loc5 // save virtual mode bspstore
- mov r20=loc6 // save virtual mode sp
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-.ret2: mov ar.rsc=loc4 // restore RSE configuration
- mov ar.pfs=loc1
- mov rp=loc0
- mov gp=loc2
- br.ret.sptk.many rp
-END(esi_call_phys)
-EXPORT_SYMBOL_GPL(esi_call_phys)
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
deleted file mode 100644
index cc4733e999..0000000000
--- a/arch/ia64/kernel/fsys.S
+++ /dev/null
@@ -1,837 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the light-weight system call handlers (fsyscall-handlers).
- *
- * Copyright (C) 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 25-Sep-03 davidm Implement fsys_rt_sigprocmask().
- * 18-Feb-03 louisk Implement fsys_gettimeofday().
- * 28-Feb-03 davidm Fixed several bugs in fsys_gettimeofday(). Tuned it some more,
- * probably broke it along the way... ;-)
- * 13-Jul-04 clameter Implement fsys_clock_gettime and revise fsys_gettimeofday to make
- * it capable of using memory based clocks without falling back to C code.
- * 08-Feb-07 Fenghua Yu Implement fsys_getcpu.
- *
- */
-
-#include <asm/asmmacro.h>
-#include <asm/errno.h>
-#include <asm/asm-offsets.h>
-#include <asm/percpu.h>
-#include <asm/thread_info.h>
-#include <asm/sal.h>
-#include <asm/signal.h>
-#include <asm/unistd.h>
-
-#include "entry.h"
-#include <asm/native/inst.h>
-
-/*
- * See Documentation/arch/ia64/fsys.rst for details on fsyscalls.
- *
- * On entry to an fsyscall handler:
- * r10 = 0 (i.e., defaults to "successful syscall return")
- * r11 = saved ar.pfs (a user-level value)
- * r15 = system call number
- * r16 = "current" task pointer (in normal kernel-mode, this is in r13)
- * r32-r39 = system call arguments
- * b6 = return address (a user-level value)
- * ar.pfs = previous frame-state (a user-level value)
- * PSR.be = cleared to zero (i.e., little-endian byte order is in effect)
- * all other registers may contain values passed in from user-mode
- *
- * On return from an fsyscall handler:
- * r11 = saved ar.pfs (as passed into the fsyscall handler)
- * r15 = system call number (as passed into the fsyscall handler)
- * r32-r39 = system call arguments (as passed into the fsyscall handler)
- * b6 = return address (as passed into the fsyscall handler)
- * ar.pfs = previous frame-state (as passed into the fsyscall handler)
- */
-
-ENTRY(fsys_ni_syscall)
- .prologue
- .altrp b6
- .body
- mov r8=ENOSYS
- mov r10=-1
- FSYS_RETURN
-END(fsys_ni_syscall)
-
-ENTRY(fsys_getpid)
- .prologue
- .altrp b6
- .body
- add r17=IA64_TASK_SIGNAL_OFFSET,r16
- ;;
- ld8 r17=[r17] // r17 = current->signal
- add r9=TI_FLAGS+IA64_TASK_SIZE,r16
- ;;
- ld4 r9=[r9]
- add r17=IA64_SIGNAL_PIDS_TGID_OFFSET,r17
- ;;
- and r9=TIF_ALLWORK_MASK,r9
- ld8 r17=[r17] // r17 = current->signal->pids[PIDTYPE_TGID]
- ;;
- add r8=IA64_PID_LEVEL_OFFSET,r17
- ;;
- ld4 r8=[r8] // r8 = pid->level
- add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0]
- ;;
- shl r8=r8,IA64_UPID_SHIFT
- ;;
- add r17=r17,r8 // r17 = &pid->numbers[pid->level]
- ;;
- ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr
- ;;
- mov r17=0
- ;;
- cmp.ne p8,p0=0,r9
-(p8) br.spnt.many fsys_fallback_syscall
- FSYS_RETURN
-END(fsys_getpid)
-
-ENTRY(fsys_set_tid_address)
- .prologue
- .altrp b6
- .body
- add r9=TI_FLAGS+IA64_TASK_SIZE,r16
- add r17=IA64_TASK_THREAD_PID_OFFSET,r16
- ;;
- ld4 r9=[r9]
- tnat.z p6,p7=r32 // check argument register for being NaT
- ld8 r17=[r17] // r17 = current->thread_pid
- ;;
- and r9=TIF_ALLWORK_MASK,r9
- add r8=IA64_PID_LEVEL_OFFSET,r17
- add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16
- ;;
- ld4 r8=[r8] // r8 = pid->level
- add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0]
- ;;
- shl r8=r8,IA64_UPID_SHIFT
- ;;
- add r17=r17,r8 // r17 = &pid->numbers[pid->level]
- ;;
- ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr
- ;;
- cmp.ne p8,p0=0,r9
- mov r17=-1
- ;;
-(p6) st8 [r18]=r32
-(p7) st8 [r18]=r17
-(p8) br.spnt.many fsys_fallback_syscall
- ;;
- mov r17=0 // i must not leak kernel bits...
- mov r18=0 // i must not leak kernel bits...
- FSYS_RETURN
-END(fsys_set_tid_address)
-
-#if IA64_GTOD_SEQ_OFFSET !=0
-#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
-#endif
-#if IA64_ITC_JITTER_OFFSET !=0
-#error fsys_gettimeofday incompatible with changes to struct itc_jitter_data_t
-#endif
-#define CLOCK_REALTIME 0
-#define CLOCK_MONOTONIC 1
-#define CLOCK_DIVIDE_BY_1000 0x4000
-#define CLOCK_ADD_MONOTONIC 0x8000
-
-ENTRY(fsys_gettimeofday)
- .prologue
- .altrp b6
- .body
- mov r31 = r32
- tnat.nz p6,p0 = r33 // guard against NaT argument
-(p6) br.cond.spnt.few .fail_einval
- mov r30 = CLOCK_DIVIDE_BY_1000
- ;;
-.gettime:
- // Register map
- // Incoming r31 = pointer to address where to place result
- // r30 = flags determining how time is processed
- // r2,r3 = temp r4-r7 preserved
- // r8 = result nanoseconds
- // r9 = result seconds
- // r10 = temporary storage for clock difference
- // r11 = preserved: saved ar.pfs
- // r12 = preserved: memory stack
- // r13 = preserved: thread pointer
- // r14 = address of mask / mask value
- // r15 = preserved: system call number
- // r16 = preserved: current task pointer
- // r17 = (not used)
- // r18 = (not used)
- // r19 = address of itc_lastcycle
- // r20 = struct fsyscall_gtod_data (= address of gtod_lock.sequence)
- // r21 = address of mmio_ptr
- // r22 = address of wall_time or monotonic_time
- // r23 = address of shift / value
- // r24 = address mult factor / cycle_last value
- // r25 = itc_lastcycle value
- // r26 = address clocksource cycle_last
- // r27 = (not used)
- // r28 = sequence number at the beginning of critical section
- // r29 = address of itc_jitter
- // r30 = time processing flags / memory address
- // r31 = pointer to result
- // Predicates
- // p6,p7 short term use
- // p8 = timesource ar.itc
- // p9 = timesource mmio64
- // p10 = timesource mmio32 - not used
- // p11 = timesource not to be handled by asm code
- // p12 = memory time source ( = p9 | p10) - not used
- // p13 = do cmpxchg with itc_lastcycle
- // p14 = Divide by 1000
- // p15 = Add monotonic
- //
- // Note that instructions are optimized for McKinley. McKinley can
- // process two bundles simultaneously and therefore we continuously
- // try to feed the CPU two bundles and then a stop.
-
- add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
- tnat.nz p6,p0 = r31 // guard against Nat argument
-(p6) br.cond.spnt.few .fail_einval
- movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address
- ;;
- ld4 r2 = [r2] // process work pending flags
- movl r29 = itc_jitter_data // itc_jitter
- add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time
- add r21 = IA64_CLKSRC_MMIO_OFFSET,r20
- mov pr = r30,0xc000 // Set predicates according to function
- ;;
- and r2 = TIF_ALLWORK_MASK,r2
- add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
-(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time
- ;;
- add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
- cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
-(p6) br.cond.spnt.many fsys_fallback_syscall
- ;;
- // Begin critical section
-.time_redo:
- ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first
- ;;
- and r28 = ~1,r28 // And make sequence even to force retry if odd
- ;;
- ld8 r30 = [r21] // clocksource->mmio_ptr
- add r24 = IA64_CLKSRC_MULT_OFFSET,r20
- ld4 r2 = [r29] // itc_jitter value
- add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20
- add r14 = IA64_CLKSRC_MASK_OFFSET,r20
- ;;
- ld4 r3 = [r24] // clocksource mult value
- ld8 r14 = [r14] // clocksource mask value
- cmp.eq p8,p9 = 0,r30 // use cpu timer if no mmio_ptr
- ;;
- setf.sig f7 = r3 // Setup for mult scaling of counter
-(p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13
- ld4 r23 = [r23] // clocksource shift value
- ld8 r24 = [r26] // get clksrc_cycle_last value
-(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control
- ;;
- .pred.rel.mutex p8,p9
- MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
-(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
-(p13) ld8 r25 = [r19] // get itc_lastcycle value
- ld8 r9 = [r22],IA64_TIME_SN_SPEC_SNSEC_OFFSET // sec
- ;;
- ld8 r8 = [r22],-IA64_TIME_SN_SPEC_SNSEC_OFFSET // snsec
-(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
- ;;
-(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
- sub r10 = r2,r24 // current_cycle - last_cycle
- ;;
-(p6) sub r10 = r25,r24 // time we got was less than last_cycle
-(p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg
- ;;
-(p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv
- ;;
-(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful
- ;;
-(p7) sub r10 = r3,r24 // then use new last_cycle instead
- ;;
- and r10 = r10,r14 // Apply mask
- ;;
- setf.sig f8 = r10
- nop.i 123
- ;;
- // fault check takes 5 cycles and we have spare time
-EX(.fail_efault, probe.w.fault r31, 3)
- xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter)
- ;;
- getf.sig r2 = f8
- mf
- ;;
- ld4 r10 = [r20] // gtod_lock.sequence
- add r8 = r8,r2 // Add xtime.nsecs
- ;;
- shr.u r8 = r8,r23 // shift by factor
- cmp4.ne p7,p0 = r28,r10
-(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
- // End critical section.
- // Now r8=tv->tv_nsec and r9=tv->tv_sec
- mov r10 = r0
- movl r2 = 1000000000
- add r23 = IA64_TIMESPEC_TV_NSEC_OFFSET, r31
-(p14) movl r3 = 2361183241434822607 // Prep for / 1000 hack
- ;;
-.time_normalize:
- mov r21 = r8
- cmp.ge p6,p0 = r8,r2
-(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting time
- ;;
-(p14) setf.sig f8 = r20
-(p6) sub r8 = r8,r2
-(p6) add r9 = 1,r9 // two nops before the branch.
-(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod
-(p6) br.cond.dpnt.few .time_normalize
- ;;
- // Divided by 8 though shift. Now divide by 125
- // The compiler was able to do that with a multiply
- // and a shift and we do the same
-EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
-(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it
- ;;
-(p14) getf.sig r2 = f8
- ;;
- mov r8 = r0
-(p14) shr.u r21 = r2, 4
- ;;
-EX(.fail_efault, st8 [r31] = r9)
-EX(.fail_efault, st8 [r23] = r21)
- FSYS_RETURN
-.fail_einval:
- mov r8 = EINVAL
- mov r10 = -1
- FSYS_RETURN
-.fail_efault:
- mov r8 = EFAULT
- mov r10 = -1
- FSYS_RETURN
-END(fsys_gettimeofday)
-
-ENTRY(fsys_clock_gettime)
- .prologue
- .altrp b6
- .body
- cmp4.ltu p6, p0 = CLOCK_MONOTONIC, r32
- // Fallback if this is not CLOCK_REALTIME or CLOCK_MONOTONIC
-(p6) br.spnt.few fsys_fallback_syscall
- mov r31 = r33
- shl r30 = r32,15
- br.many .gettime
-END(fsys_clock_gettime)
-
-/*
- * fsys_getcpu doesn't use the third parameter in this implementation. It reads
- * current_thread_info()->cpu and corresponding node in cpu_to_node_map.
- */
-ENTRY(fsys_getcpu)
- .prologue
- .altrp b6
- .body
- ;;
- add r2=TI_FLAGS+IA64_TASK_SIZE,r16
- tnat.nz p6,p0 = r32 // guard against NaT argument
- add r3=TI_CPU+IA64_TASK_SIZE,r16
- ;;
- ld4 r3=[r3] // M r3 = thread_info->cpu
- ld4 r2=[r2] // M r2 = thread_info->flags
-(p6) br.cond.spnt.few .fail_einval // B
- ;;
- tnat.nz p7,p0 = r33 // I guard against NaT argument
-(p7) br.cond.spnt.few .fail_einval // B
- ;;
- cmp.ne p6,p0=r32,r0
- cmp.ne p7,p0=r33,r0
- ;;
-#ifdef CONFIG_NUMA
- movl r17=cpu_to_node_map
- ;;
-EX(.fail_efault, (p6) probe.w.fault r32, 3) // M This takes 5 cycles
-EX(.fail_efault, (p7) probe.w.fault r33, 3) // M This takes 5 cycles
- shladd r18=r3,1,r17
- ;;
- ld2 r20=[r18] // r20 = cpu_to_node_map[cpu]
- and r2 = TIF_ALLWORK_MASK,r2
- ;;
- cmp.ne p8,p0=0,r2
-(p8) br.spnt.many fsys_fallback_syscall
- ;;
- ;;
-EX(.fail_efault, (p6) st4 [r32] = r3)
-EX(.fail_efault, (p7) st2 [r33] = r20)
- mov r8=0
- ;;
-#else
-EX(.fail_efault, (p6) probe.w.fault r32, 3) // M This takes 5 cycles
-EX(.fail_efault, (p7) probe.w.fault r33, 3) // M This takes 5 cycles
- and r2 = TIF_ALLWORK_MASK,r2
- ;;
- cmp.ne p8,p0=0,r2
-(p8) br.spnt.many fsys_fallback_syscall
- ;;
-EX(.fail_efault, (p6) st4 [r32] = r3)
-EX(.fail_efault, (p7) st2 [r33] = r0)
- mov r8=0
- ;;
-#endif
- FSYS_RETURN
-END(fsys_getcpu)
-
-ENTRY(fsys_fallback_syscall)
- .prologue
- .altrp b6
- .body
- /*
- * We only get here from light-weight syscall handlers. Thus, we already
- * know that r15 contains a valid syscall number. No need to re-check.
- */
- adds r17=-1024,r15
- movl r14=sys_call_table
- ;;
- RSM_PSR_I(p0, r26, r27)
- shladd r18=r17,3,r14
- ;;
- ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
- MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency)
- mov r27=ar.rsc
- mov r21=ar.fpsr
- mov r26=ar.pfs
-END(fsys_fallback_syscall)
- /* FALL THROUGH */
-GLOBAL_ENTRY(fsys_bubble_down)
- .prologue
- .altrp b6
- .body
- /*
- * We get here for syscalls that don't have a lightweight
- * handler. For those, we need to bubble down into the kernel
- * and that requires setting up a minimal pt_regs structure,
- * and initializing the CPU state more or less as if an
- * interruption had occurred. To make syscall-restarts work,
- * we setup pt_regs such that cr_iip points to the second
- * instruction in syscall_via_break. Decrementing the IP
- * hence will restart the syscall via break and not
- * decrementing IP will return us to the caller, as usual.
- * Note that we preserve the value of psr.pp rather than
- * initializing it from dcr.pp. This makes it possible to
- * distinguish fsyscall execution from other privileged
- * execution.
- *
- * On entry:
- * - normal fsyscall handler register usage, except
- * that we also have:
- * - r18: address of syscall entry point
- * - r21: ar.fpsr
- * - r26: ar.pfs
- * - r27: ar.rsc
- * - r29: psr
- *
- * We used to clear some PSR bits here but that requires slow
- * serialization. Fortunately, that isn't really necessary.
- * The rationale is as follows: we used to clear bits
- * ~PSR_PRESERVED_BITS in PSR.L. Since
- * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we
- * ended up clearing PSR.{BE,AC,I,DFL,DFH,DI,DB,SI,TB}.
- * However,
- *
- * PSR.BE : already is turned off in __kernel_syscall_via_epc()
- * PSR.AC : don't care (kernel normally turns PSR.AC on)
- * PSR.I : already turned off by the time fsys_bubble_down gets
- * invoked
- * PSR.DFL: always 0 (kernel never turns it on)
- * PSR.DFH: don't care --- kernel never touches f32-f127 on its own
- * initiative
- * PSR.DI : always 0 (kernel never turns it on)
- * PSR.SI : always 0 (kernel never turns it on)
- * PSR.DB : don't care --- kernel never enables kernel-level
- * breakpoints
- * PSR.TB : must be 0 already; if it wasn't zero on entry to
- * __kernel_syscall_via_epc, the branch to fsys_bubble_down
- * will trigger a taken branch; the taken-trap-handler then
- * converts the syscall into a break-based system-call.
- */
- /*
- * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc.
- * The rest we have to synthesize.
- */
-# define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) \
- | (0x1 << IA64_PSR_RI_BIT) \
- | IA64_PSR_BN | IA64_PSR_I)
-
- invala // M0|1
- movl r14=ia64_ret_from_syscall // X
-
- nop.m 0
- movl r28=__kernel_syscall_via_break // X create cr.iip
- ;;
-
- mov r2=r16 // A get task addr to addl-addressable register
- adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // A
- mov r31=pr // I0 save pr (2 cyc)
- ;;
- st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
- addl r22=IA64_RBS_OFFSET,r2 // A compute base of RBS
- add r3=TI_FLAGS+IA64_TASK_SIZE,r2 // A
- ;;
- ld4 r3=[r3] // M0|1 r3 = current_thread_info()->flags
- lfetch.fault.excl.nt1 [r22] // M0|1 prefetch register backing-store
- nop.i 0
- ;;
- mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting
-#else
- nop.m 0
-#endif
- nop.i 0
- ;;
- mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore
- mov.m r24=ar.rnat // M2 (5 cyc) read ar.rnat (dual-issues!)
- nop.i 0
- ;;
- mov ar.bspstore=r22 // M2 (6 cyc) switch to kernel RBS
- movl r8=PSR_ONE_BITS // X
- ;;
- mov r25=ar.unat // M2 (5 cyc) save ar.unat
- mov r19=b6 // I0 save b6 (2 cyc)
- mov r20=r1 // A save caller's gp in r20
- ;;
- or r29=r8,r29 // A construct cr.ipsr value to save
- mov b6=r18 // I0 copy syscall entry-point to b6 (7 cyc)
- addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of memory stack
-
- mov r18=ar.bsp // M2 save (kernel) ar.bsp (12 cyc)
- cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
- br.call.sptk.many b7=ia64_syscall_setup // B
- ;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- // mov.m r30=ar.itc is called in advance
- add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
- add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
- ;;
- ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
- ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at leave kernel
- ;;
- ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
- ld8 r21=[r17] // cumulated utime
- sub r22=r19,r18 // stime before leave kernel
- ;;
- st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // update stamp
- sub r18=r30,r19 // elapsed time in user mode
- ;;
- add r20=r20,r22 // sum stime
- add r21=r21,r18 // sum utime
- ;;
- st8 [r16]=r20 // update stime
- st8 [r17]=r21 // update utime
- ;;
-#endif
- mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
- mov rp=r14 // I0 set the real return addr
- and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
- ;;
- SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs
- cmp.eq p8,p0=r3,r0 // A
-(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
-
- nop.m 0
-(p8) br.call.sptk.many b6=b6 // B (ignore return address)
- br.cond.spnt ia64_trace_syscall // B
-END(fsys_bubble_down)
-
- .rodata
- .align 8
- .globl fsyscall_table
-
- data8 fsys_bubble_down
-fsyscall_table:
- data8 fsys_ni_syscall
- data8 0 // exit // 1025
- data8 0 // read
- data8 0 // write
- data8 0 // open
- data8 0 // close
- data8 0 // creat // 1030
- data8 0 // link
- data8 0 // unlink
- data8 0 // execve
- data8 0 // chdir
- data8 0 // fchdir // 1035
- data8 0 // utimes
- data8 0 // mknod
- data8 0 // chmod
- data8 0 // chown
- data8 0 // lseek // 1040
- data8 fsys_getpid // getpid
- data8 0 // getppid
- data8 0 // mount
- data8 0 // umount
- data8 0 // setuid // 1045
- data8 0 // getuid
- data8 0 // geteuid
- data8 0 // ptrace
- data8 0 // access
- data8 0 // sync // 1050
- data8 0 // fsync
- data8 0 // fdatasync
- data8 0 // kill
- data8 0 // rename
- data8 0 // mkdir // 1055
- data8 0 // rmdir
- data8 0 // dup
- data8 0 // pipe
- data8 0 // times
- data8 0 // brk // 1060
- data8 0 // setgid
- data8 0 // getgid
- data8 0 // getegid
- data8 0 // acct
- data8 0 // ioctl // 1065
- data8 0 // fcntl
- data8 0 // umask
- data8 0 // chroot
- data8 0 // ustat
- data8 0 // dup2 // 1070
- data8 0 // setreuid
- data8 0 // setregid
- data8 0 // getresuid
- data8 0 // setresuid
- data8 0 // getresgid // 1075
- data8 0 // setresgid
- data8 0 // getgroups
- data8 0 // setgroups
- data8 0 // getpgid
- data8 0 // setpgid // 1080
- data8 0 // setsid
- data8 0 // getsid
- data8 0 // sethostname
- data8 0 // setrlimit
- data8 0 // getrlimit // 1085
- data8 0 // getrusage
- data8 fsys_gettimeofday // gettimeofday
- data8 0 // settimeofday
- data8 0 // select
- data8 0 // poll // 1090
- data8 0 // symlink
- data8 0 // readlink
- data8 0 // uselib
- data8 0 // swapon
- data8 0 // swapoff // 1095
- data8 0 // reboot
- data8 0 // truncate
- data8 0 // ftruncate
- data8 0 // fchmod
- data8 0 // fchown // 1100
- data8 0 // getpriority
- data8 0 // setpriority
- data8 0 // statfs
- data8 0 // fstatfs
- data8 0 // gettid // 1105
- data8 0 // semget
- data8 0 // semop
- data8 0 // semctl
- data8 0 // msgget
- data8 0 // msgsnd // 1110
- data8 0 // msgrcv
- data8 0 // msgctl
- data8 0 // shmget
- data8 0 // shmat
- data8 0 // shmdt // 1115
- data8 0 // shmctl
- data8 0 // syslog
- data8 0 // setitimer
- data8 0 // getitimer
- data8 0 // 1120
- data8 0
- data8 0
- data8 0 // vhangup
- data8 0 // lchown
- data8 0 // remap_file_pages // 1125
- data8 0 // wait4
- data8 0 // sysinfo
- data8 0 // clone
- data8 0 // setdomainname
- data8 0 // newuname // 1130
- data8 0 // adjtimex
- data8 0
- data8 0 // init_module
- data8 0 // delete_module
- data8 0 // 1135
- data8 0
- data8 0 // quotactl
- data8 0 // bdflush
- data8 0 // sysfs
- data8 0 // personality // 1140
- data8 0 // afs_syscall
- data8 0 // setfsuid
- data8 0 // setfsgid
- data8 0 // getdents
- data8 0 // flock // 1145
- data8 0 // readv
- data8 0 // writev
- data8 0 // pread64
- data8 0 // pwrite64
- data8 0 // sysctl // 1150
- data8 0 // mmap
- data8 0 // munmap
- data8 0 // mlock
- data8 0 // mlockall
- data8 0 // mprotect // 1155
- data8 0 // mremap
- data8 0 // msync
- data8 0 // munlock
- data8 0 // munlockall
- data8 0 // sched_getparam // 1160
- data8 0 // sched_setparam
- data8 0 // sched_getscheduler
- data8 0 // sched_setscheduler
- data8 0 // sched_yield
- data8 0 // sched_get_priority_max // 1165
- data8 0 // sched_get_priority_min
- data8 0 // sched_rr_get_interval
- data8 0 // nanosleep
- data8 0 // nfsservctl
- data8 0 // prctl // 1170
- data8 0 // getpagesize
- data8 0 // mmap2
- data8 0 // pciconfig_read
- data8 0 // pciconfig_write
- data8 0 // perfmonctl // 1175
- data8 0 // sigaltstack
- data8 0 // rt_sigaction
- data8 0 // rt_sigpending
- data8 0 // rt_sigprocmask
- data8 0 // rt_sigqueueinfo // 1180
- data8 0 // rt_sigreturn
- data8 0 // rt_sigsuspend
- data8 0 // rt_sigtimedwait
- data8 0 // getcwd
- data8 0 // capget // 1185
- data8 0 // capset
- data8 0 // sendfile
- data8 0
- data8 0
- data8 0 // socket // 1190
- data8 0 // bind
- data8 0 // connect
- data8 0 // listen
- data8 0 // accept
- data8 0 // getsockname // 1195
- data8 0 // getpeername
- data8 0 // socketpair
- data8 0 // send
- data8 0 // sendto
- data8 0 // recv // 1200
- data8 0 // recvfrom
- data8 0 // shutdown
- data8 0 // setsockopt
- data8 0 // getsockopt
- data8 0 // sendmsg // 1205
- data8 0 // recvmsg
- data8 0 // pivot_root
- data8 0 // mincore
- data8 0 // madvise
- data8 0 // newstat // 1210
- data8 0 // newlstat
- data8 0 // newfstat
- data8 0 // clone2
- data8 0 // getdents64
- data8 0 // getunwind // 1215
- data8 0 // readahead
- data8 0 // setxattr
- data8 0 // lsetxattr
- data8 0 // fsetxattr
- data8 0 // getxattr // 1220
- data8 0 // lgetxattr
- data8 0 // fgetxattr
- data8 0 // listxattr
- data8 0 // llistxattr
- data8 0 // flistxattr // 1225
- data8 0 // removexattr
- data8 0 // lremovexattr
- data8 0 // fremovexattr
- data8 0 // tkill
- data8 0 // futex // 1230
- data8 0 // sched_setaffinity
- data8 0 // sched_getaffinity
- data8 fsys_set_tid_address // set_tid_address
- data8 0 // fadvise64_64
- data8 0 // tgkill // 1235
- data8 0 // exit_group
- data8 0 // lookup_dcookie
- data8 0 // io_setup
- data8 0 // io_destroy
- data8 0 // io_getevents // 1240
- data8 0 // io_submit
- data8 0 // io_cancel
- data8 0 // epoll_create
- data8 0 // epoll_ctl
- data8 0 // epoll_wait // 1245
- data8 0 // restart_syscall
- data8 0 // semtimedop
- data8 0 // timer_create
- data8 0 // timer_settime
- data8 0 // timer_gettime // 1250
- data8 0 // timer_getoverrun
- data8 0 // timer_delete
- data8 0 // clock_settime
- data8 fsys_clock_gettime // clock_gettime
- data8 0 // clock_getres // 1255
- data8 0 // clock_nanosleep
- data8 0 // fstatfs64
- data8 0 // statfs64
- data8 0 // mbind
- data8 0 // get_mempolicy // 1260
- data8 0 // set_mempolicy
- data8 0 // mq_open
- data8 0 // mq_unlink
- data8 0 // mq_timedsend
- data8 0 // mq_timedreceive // 1265
- data8 0 // mq_notify
- data8 0 // mq_getsetattr
- data8 0 // kexec_load
- data8 0 // vserver
- data8 0 // waitid // 1270
- data8 0 // add_key
- data8 0 // request_key
- data8 0 // keyctl
- data8 0 // ioprio_set
- data8 0 // ioprio_get // 1275
- data8 0 // move_pages
- data8 0 // inotify_init
- data8 0 // inotify_add_watch
- data8 0 // inotify_rm_watch
- data8 0 // migrate_pages // 1280
- data8 0 // openat
- data8 0 // mkdirat
- data8 0 // mknodat
- data8 0 // fchownat
- data8 0 // futimesat // 1285
- data8 0 // newfstatat
- data8 0 // unlinkat
- data8 0 // renameat
- data8 0 // linkat
- data8 0 // symlinkat // 1290
- data8 0 // readlinkat
- data8 0 // fchmodat
- data8 0 // faccessat
- data8 0
- data8 0 // 1295
- data8 0 // unshare
- data8 0 // splice
- data8 0 // set_robust_list
- data8 0 // get_robust_list
- data8 0 // sync_file_range // 1300
- data8 0 // tee
- data8 0 // vmsplice
- data8 0
- data8 fsys_getcpu // getcpu // 1304
-
- // fill in zeros for the remaining entries
- .zero:
- .space fsyscall_table + 8*NR_syscalls - .zero, 0
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
deleted file mode 100644
index cc28614459..0000000000
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
- * Contributed by Peter Keilty <peter.keilty@hp.com>
- *
- * fsyscall gettimeofday data
- */
-
-/* like timespec, but includes "shifted nanoseconds" */
-struct time_sn_spec {
- u64 sec;
- u64 snsec;
-};
-
-struct fsyscall_gtod_data_t {
- seqcount_t seq;
- struct time_sn_spec wall_time;
- struct time_sn_spec monotonic_time;
- u64 clk_mask;
- u32 clk_mult;
- u32 clk_shift;
- void *clk_fsys_mmio;
- u64 clk_cycle_last;
-} ____cacheline_aligned;
-
-struct itc_jitter_data_t {
- int itc_jitter;
- u64 itc_lastcycle;
-} ____cacheline_aligned;
-
diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c
deleted file mode 100644
index d6360fd404..0000000000
--- a/arch/ia64/kernel/ftrace.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Dynamic function tracing support.
- *
- * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com>
- *
- * For licencing details, see COPYING.
- *
- * Defines low-level handling of mcount calls when the kernel
- * is compiled with the -pg flag. When using dynamic ftrace, the
- * mcount call-sites get patched lazily with NOP till they are
- * enabled. All code mutation routines here take effect atomically.
- */
-
-#include <linux/uaccess.h>
-#include <linux/ftrace.h>
-
-#include <asm/cacheflush.h>
-#include <asm/patch.h>
-
-/* In IA64, each function will be added below two bundles with -pg option */
-static unsigned char __attribute__((aligned(8)))
-ftrace_orig_code[MCOUNT_INSN_SIZE] = {
- 0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
- 0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */
- 0x05, 0x00, 0xc4, 0x00, /* mov r42=b0 */
- 0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */
- 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
- 0x08, 0x00, 0x00, 0x50 /* br.call.sptk.many b0 = _mcount;; */
-};
-
-struct ftrace_orig_insn {
- u64 dummy1, dummy2, dummy3;
- u64 dummy4:64-41+13;
- u64 imm20:20;
- u64 dummy5:3;
- u64 sign:1;
- u64 dummy6:4;
-};
-
-/* mcount stub will be converted below for nop */
-static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = {
- 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
- 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
- 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */
- 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */
- 0x00, 0x00, 0x04, 0x00
-};
-
-static unsigned char *ftrace_nop_replace(void)
-{
- return ftrace_nop_code;
-}
-
-/*
- * mcount stub will be converted below for call
- * Note: Just the last instruction is changed against nop
- * */
-static unsigned char __attribute__((aligned(8)))
-ftrace_call_code[MCOUNT_INSN_SIZE] = {
- 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
- 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
- 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */
- 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
- 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/
- 0xf8, 0xff, 0xff, 0xc8
-};
-
-struct ftrace_call_insn {
- u64 dummy1, dummy2;
- u64 dummy3:48;
- u64 imm39_l:16;
- u64 imm39_h:23;
- u64 dummy4:13;
- u64 imm20:20;
- u64 dummy5:3;
- u64 i:1;
- u64 dummy6:4;
-};
-
-static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
-{
- struct ftrace_call_insn *code = (void *)ftrace_call_code;
- unsigned long offset = addr - (ip + 0x10);
-
- code->imm39_l = offset >> 24;
- code->imm39_h = offset >> 40;
- code->imm20 = offset >> 4;
- code->i = offset >> 63;
- return ftrace_call_code;
-}
-
-static int
-ftrace_modify_code(unsigned long ip, unsigned char *old_code,
- unsigned char *new_code, int do_check)
-{
- unsigned char replaced[MCOUNT_INSN_SIZE];
-
- /*
- * Note:
- * We are paranoid about modifying text, as if a bug was to happen, it
- * could cause us to read or write to someplace that could cause harm.
- * Carefully read and modify the code with probe_kernel_*(), and make
- * sure what we read is what we expected it to be before modifying it.
- */
-
- if (!do_check)
- goto skip_check;
-
- /* read the text we want to modify */
- if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /* Make sure it is what we expect it to be */
- if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
- return -EINVAL;
-
-skip_check:
- /* replace the text with the new text */
- if (copy_to_kernel_nofault(((void *)ip), new_code, MCOUNT_INSN_SIZE))
- return -EPERM;
- flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
-
- return 0;
-}
-
-static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE];
- unsigned long ip = rec->ip;
-
- if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
- if (rec->flags & FTRACE_FL_CONVERTED) {
- struct ftrace_call_insn *call_insn, *tmp_call;
-
- call_insn = (void *)ftrace_call_code;
- tmp_call = (void *)replaced;
- call_insn->imm39_l = tmp_call->imm39_l;
- call_insn->imm39_h = tmp_call->imm39_h;
- call_insn->imm20 = tmp_call->imm20;
- call_insn->i = tmp_call->i;
- if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0)
- return -EINVAL;
- return 0;
- } else {
- struct ftrace_orig_insn *call_insn, *tmp_call;
-
- call_insn = (void *)ftrace_orig_code;
- tmp_call = (void *)replaced;
- call_insn->sign = tmp_call->sign;
- call_insn->imm20 = tmp_call->imm20;
- if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0)
- return -EINVAL;
- return 0;
- }
-}
-
-int ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
-{
- int ret;
- char *new;
-
- ret = ftrace_make_nop_check(rec, addr);
- if (ret)
- return ret;
- new = ftrace_nop_replace();
- return ftrace_modify_code(rec->ip, NULL, new, 0);
-}
-
-int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned long ip = rec->ip;
- unsigned char *old, *new;
-
- old= ftrace_nop_replace();
- new = ftrace_call_replace(ip, addr);
- return ftrace_modify_code(ip, old, new, 1);
-}
-
-/* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */
-int ftrace_update_ftrace_func(ftrace_func_t func)
-{
- unsigned long ip;
- unsigned long addr = ((struct fnptr *)ftrace_call)->ip;
-
- if (func == ftrace_stub)
- return 0;
- ip = ((struct fnptr *)func)->ip;
-
- ia64_patch_imm64(addr + 2, ip);
-
- flush_icache_range(addr, addr + 16);
- return 0;
-}
diff --git a/arch/ia64/kernel/gate-data.S b/arch/ia64/kernel/gate-data.S
deleted file mode 100644
index b3ef1c72e1..0000000000
--- a/arch/ia64/kernel/gate-data.S
+++ /dev/null
@@ -1,3 +0,0 @@
- .section .data..gate, "aw"
-
- .incbin "arch/ia64/kernel/gate.so"
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
deleted file mode 100644
index 9f235cd551..0000000000
--- a/arch/ia64/kernel/gate.S
+++ /dev/null
@@ -1,380 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the code that gets mapped at the upper end of each task's text
- * region. For now, it contains the signal trampoline code only.
- *
- * Copyright (C) 1999-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-
-#include <asm/asmmacro.h>
-#include <asm/errno.h>
-#include <asm/asm-offsets.h>
-#include <asm/sigcontext.h>
-#include <asm/unistd.h>
-#include <asm/kregs.h>
-#include <asm/page.h>
-#include <asm/native/inst.h>
-
-/*
- * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
- * complications with the linker (which likes to create PLT stubs for branches
- * to targets outside the shared object) and to avoid multi-phase kernel builds, we
- * simply create minimalistic "patch lists" in special ELF sections.
- */
- .section ".data..patch.fsyscall_table", "a"
- .previous
-#define LOAD_FSYSCALL_TABLE(reg) \
-[1:] movl reg=0; \
- .xdata4 ".data..patch.fsyscall_table", 1b-.
-
- .section ".data..patch.brl_fsys_bubble_down", "a"
- .previous
-#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
-[1:](pr)brl.cond.sptk 0; \
- ;; \
- .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-.
-
-GLOBAL_ENTRY(__kernel_syscall_via_break)
- .prologue
- .altrp b6
- .body
- /*
- * Note: for (fast) syscall restart to work, the break instruction must be
- * the first one in the bundle addressed by syscall_via_break.
- */
-{ .mib
- break 0x100000
- nop.i 0
- br.ret.sptk.many b6
-}
-END(__kernel_syscall_via_break)
-
-# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
-# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
-# define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET)
-# define SIGHANDLER_OFF (16 + IA64_SIGFRAME_HANDLER_OFFSET)
-# define SIGCONTEXT_OFF (16 + IA64_SIGFRAME_SIGCONTEXT_OFFSET)
-
-# define FLAGS_OFF IA64_SIGCONTEXT_FLAGS_OFFSET
-# define CFM_OFF IA64_SIGCONTEXT_CFM_OFFSET
-# define FR6_OFF IA64_SIGCONTEXT_FR6_OFFSET
-# define BSP_OFF IA64_SIGCONTEXT_AR_BSP_OFFSET
-# define RNAT_OFF IA64_SIGCONTEXT_AR_RNAT_OFFSET
-# define UNAT_OFF IA64_SIGCONTEXT_AR_UNAT_OFFSET
-# define FPSR_OFF IA64_SIGCONTEXT_AR_FPSR_OFFSET
-# define PR_OFF IA64_SIGCONTEXT_PR_OFFSET
-# define RP_OFF IA64_SIGCONTEXT_IP_OFFSET
-# define SP_OFF IA64_SIGCONTEXT_R12_OFFSET
-# define RBS_BASE_OFF IA64_SIGCONTEXT_RBS_BASE_OFFSET
-# define LOADRS_OFF IA64_SIGCONTEXT_LOADRS_OFFSET
-# define base0 r2
-# define base1 r3
- /*
- * When we get here, the memory stack looks like this:
- *
- * +===============================+
- * | |
- * // struct sigframe //
- * | |
- * +-------------------------------+ <-- sp+16
- * | 16 byte of scratch |
- * | space |
- * +-------------------------------+ <-- sp
- *
- * The register stack looks _exactly_ the way it looked at the time the signal
- * occurred. In other words, we're treading on a potential mine-field: each
- * incoming general register may be a NaT value (including sp, in which case the
- * process ends up dying with a SIGSEGV).
- *
- * The first thing need to do is a cover to get the registers onto the backing
- * store. Once that is done, we invoke the signal handler which may modify some
- * of the machine state. After returning from the signal handler, we return
- * control to the previous context by executing a sigreturn system call. A signal
- * handler may call the rt_sigreturn() function to directly return to a given
- * sigcontext. However, the user-level sigreturn() needs to do much more than
- * calling the rt_sigreturn() system call as it needs to unwind the stack to
- * restore preserved registers that may have been saved on the signal handler's
- * call stack.
- */
-
-#define SIGTRAMP_SAVES \
- .unwabi 3, 's'; /* mark this as a sigtramp handler (saves scratch regs) */ \
- .unwabi @svr4, 's'; /* backwards compatibility with old unwinders (remove in v2.7) */ \
- .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF; \
- .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF; \
- .savesp pr, PR_OFF+SIGCONTEXT_OFF; \
- .savesp rp, RP_OFF+SIGCONTEXT_OFF; \
- .savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \
- .vframesp SP_OFF+SIGCONTEXT_OFF
-
-GLOBAL_ENTRY(__kernel_sigtramp)
- // describe the state that is active when we get here:
- .prologue
- SIGTRAMP_SAVES
- .body
-
- .label_state 1
-
- adds base0=SIGHANDLER_OFF,sp
- adds base1=RBS_BASE_OFF+SIGCONTEXT_OFF,sp
- br.call.sptk.many rp=1f
-1:
- ld8 r17=[base0],(ARG0_OFF-SIGHANDLER_OFF) // get pointer to signal handler's plabel
- ld8 r15=[base1] // get address of new RBS base (or NULL)
- cover // push args in interrupted frame onto backing store
- ;;
- cmp.ne p1,p0=r15,r0 // do we need to switch rbs? (note: pr is saved by kernel)
- mov.m r9=ar.bsp // fetch ar.bsp
- .spillsp.p p1, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
-(p1) br.cond.spnt setup_rbs // yup -> (clobbers p8, r14-r16, and r18-r20)
-back_from_setup_rbs:
- alloc r8=ar.pfs,0,0,3,0
- ld8 out0=[base0],16 // load arg0 (signum)
- adds base1=(ARG1_OFF-(RBS_BASE_OFF+SIGCONTEXT_OFF)),base1
- ;;
- ld8 out1=[base1] // load arg1 (siginfop)
- ld8 r10=[r17],8 // get signal handler entry point
- ;;
- ld8 out2=[base0] // load arg2 (sigcontextp)
- ld8 gp=[r17] // get signal handler's global pointer
- adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
- ;;
- .spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF
- st8 [base0]=r9 // save sc_ar_bsp
- adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
- adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
- ;;
- stf.spill [base0]=f6,32
- stf.spill [base1]=f7,32
- ;;
- stf.spill [base0]=f8,32
- stf.spill [base1]=f9,32
- mov b6=r10
- ;;
- stf.spill [base0]=f10,32
- stf.spill [base1]=f11,32
- ;;
- stf.spill [base0]=f12,32
- stf.spill [base1]=f13,32
- ;;
- stf.spill [base0]=f14,32
- stf.spill [base1]=f15,32
- br.call.sptk.many rp=b6 // call the signal handler
-.ret0: adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
- ;;
- ld8 r15=[base0] // fetch sc_ar_bsp
- mov r14=ar.bsp
- ;;
- cmp.ne p1,p0=r14,r15 // do we need to restore the rbs?
-(p1) br.cond.spnt restore_rbs // yup -> (clobbers r14-r18, f6 & f7)
- ;;
-back_from_restore_rbs:
- adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
- adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
- ;;
- ldf.fill f6=[base0],32
- ldf.fill f7=[base1],32
- ;;
- ldf.fill f8=[base0],32
- ldf.fill f9=[base1],32
- ;;
- ldf.fill f10=[base0],32
- ldf.fill f11=[base1],32
- ;;
- ldf.fill f12=[base0],32
- ldf.fill f13=[base1],32
- ;;
- ldf.fill f14=[base0],32
- ldf.fill f15=[base1],32
- mov r15=__NR_rt_sigreturn
- .restore sp // pop .prologue
- break __BREAK_SYSCALL
-
- .prologue
- SIGTRAMP_SAVES
-setup_rbs:
- mov ar.rsc=0 // put RSE into enforced lazy mode
- ;;
- .save ar.rnat, r19
- mov r19=ar.rnat // save RNaT before switching backing store area
- adds r14=(RNAT_OFF+SIGCONTEXT_OFF),sp
-
- mov r18=ar.bspstore
- mov ar.bspstore=r15 // switch over to new register backing store area
- ;;
-
- .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
- st8 [r14]=r19 // save sc_ar_rnat
- .body
- mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16
- adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp
- ;;
- invala
- sub r15=r16,r15
- extr.u r20=r18,3,6
- ;;
- mov ar.rsc=0xf // set RSE into eager mode, pl 3
- cmp.eq p8,p0=63,r20
- shl r15=r15,16
- ;;
- st8 [r14]=r15 // save sc_loadrs
-(p8) st8 [r18]=r19 // if bspstore points at RNaT slot, store RNaT there now
- .restore sp // pop .prologue
- br.cond.sptk back_from_setup_rbs
-
- .prologue
- SIGTRAMP_SAVES
- .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
- .body
-restore_rbs:
- // On input:
- // r14 = bsp1 (bsp at the time of return from signal handler)
- // r15 = bsp0 (bsp at the time the signal occurred)
- //
- // Here, we need to calculate bspstore0, the value that ar.bspstore needs
- // to be set to, based on bsp0 and the size of the dirty partition on
- // the alternate stack (sc_loadrs >> 16). This can be done with the
- // following algorithm:
- //
- // bspstore0 = rse_skip_regs(bsp0, -rse_num_regs(bsp1 - (loadrs >> 19), bsp1));
- //
- // This is what the code below does.
- //
- alloc r2=ar.pfs,0,0,0,0 // alloc null frame
- adds r16=(LOADRS_OFF+SIGCONTEXT_OFF),sp
- adds r18=(RNAT_OFF+SIGCONTEXT_OFF),sp
- ;;
- ld8 r17=[r16]
- ld8 r16=[r18] // get new rnat
- extr.u r18=r15,3,6 // r18 <- rse_slot_num(bsp0)
- ;;
- mov ar.rsc=r17 // put RSE into enforced lazy mode
- shr.u r17=r17,16
- ;;
- sub r14=r14,r17 // r14 (bspstore1) <- bsp1 - (sc_loadrs >> 16)
- shr.u r17=r17,3 // r17 <- (sc_loadrs >> 19)
- ;;
- loadrs // restore dirty partition
- extr.u r14=r14,3,6 // r14 <- rse_slot_num(bspstore1)
- ;;
- add r14=r14,r17 // r14 <- rse_slot_num(bspstore1) + (sc_loadrs >> 19)
- ;;
- shr.u r14=r14,6 // r14 <- (rse_slot_num(bspstore1) + (sc_loadrs >> 19))/0x40
- ;;
- sub r14=r14,r17 // r14 <- -rse_num_regs(bspstore1, bsp1)
- movl r17=0x8208208208208209
- ;;
- add r18=r18,r14 // r18 (delta) <- rse_slot_num(bsp0) - rse_num_regs(bspstore1,bsp1)
- setf.sig f7=r17
- cmp.lt p7,p0=r14,r0 // p7 <- (r14 < 0)?
- ;;
-(p7) adds r18=-62,r18 // delta -= 62
- ;;
- setf.sig f6=r18
- ;;
- xmpy.h f6=f6,f7
- ;;
- getf.sig r17=f6
- ;;
- add r17=r17,r18
- shr r18=r18,63
- ;;
- shr r17=r17,5
- ;;
- sub r17=r17,r18 // r17 = delta/63
- ;;
- add r17=r14,r17 // r17 <- delta/63 - rse_num_regs(bspstore1, bsp1)
- ;;
- shladd r15=r17,3,r15 // r15 <- bsp0 + 8*(delta/63 - rse_num_regs(bspstore1, bsp1))
- ;;
- mov ar.bspstore=r15 // switch back to old register backing store area
- ;;
- mov ar.rnat=r16 // restore RNaT
- mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc)
- // invala not necessary as that will happen when returning to user-mode
- br.cond.sptk back_from_restore_rbs
-END(__kernel_sigtramp)
-
-/*
- * On entry:
- * r11 = saved ar.pfs
- * r15 = system call #
- * b0 = saved return address
- * b6 = return address
- * On exit:
- * r11 = saved ar.pfs
- * r15 = system call #
- * b0 = saved return address
- * all other "scratch" registers: undefined
- * all "preserved" registers: same as on entry
- */
-
-GLOBAL_ENTRY(__kernel_syscall_via_epc)
- .prologue
- .altrp b6
- .body
-{
- /*
- * Note: the kernel cannot assume that the first two instructions in this
- * bundle get executed. The remaining code must be safe even if
- * they do not get executed.
- */
- adds r17=-1024,r15 // A
- mov r10=0 // A default to successful syscall execution
- epc // B causes split-issue
-}
- ;;
- RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d)
- LOAD_FSYSCALL_TABLE(r14) // X
- ;;
- mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
- shladd r18=r17,3,r14 // A
- mov r19=NR_syscalls-1 // A
- ;;
- lfetch [r18] // M0|1
- MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc)
- // If r17 is a NaT, p6 will be zero
- cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
- ;;
- mov r21=ar.fpsr // M2 (12 cyc)
- tnat.nz p10,p9=r15 // I0
- mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
- ;;
- srlz.d // M0 (forces split-issue) ensure PSR.BE==0
-(p6) ld8 r18=[r18] // M0|1
- nop.i 0
- ;;
- nop.m 0
-(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
- nop.i 0
- ;;
- SSM_PSR_I(p8, p14, r25)
-(p6) mov b7=r18 // I0
-(p8) br.dptk.many b7 // B
-
- mov r27=ar.rsc // M2 (12 cyc)
-/*
- * brl.cond doesn't work as intended because the linker would convert this branch
- * into a branch to a PLT. Perhaps there will be a way to avoid this with some
- * future version of the linker. In the meantime, we just use an indirect branch
- * instead.
- */
-#ifdef CONFIG_ITANIUM
-(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
- ;;
-(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
- ;;
-(p6) mov b7=r14
-(p6) br.sptk.many b7
-#else
- BRL_COND_FSYS_BUBBLE_DOWN(p6)
-#endif
- SSM_PSR_I(p0, p14, r10)
- mov r10=-1
-(p10) mov r8=EINVAL
-(p9) mov r8=ENOSYS
- FSYS_RETURN
-
-END(__kernel_syscall_via_epc)
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
deleted file mode 100644
index 461c7e69d4..0000000000
--- a/arch/ia64/kernel/gate.lds.S
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Linker script for gate DSO. The gate pages are an ELF shared object
- * prelinked to its virtual address, with only one read-only segment and
- * one execute-only segment (both fit in one page). This script controls
- * its layout.
- */
-
-#include <asm/page.h>
-
-SECTIONS
-{
- . = GATE_ADDR + SIZEOF_HEADERS;
-
- .hash : { *(.hash) } :readable
- .gnu.hash : { *(.gnu.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
-
- .note : { *(.note*) } :readable :note
-
- .dynamic : { *(.dynamic) } :readable :dynamic
-
- /*
- * This linker script is used both with -r and with -shared. For
- * the layouts to match, we need to skip more than enough space for
- * the dynamic symbol table et al. If this amount is insufficient,
- * ld -shared will barf. Just increase it here.
- */
- . = GATE_ADDR + 0x600;
-
- .data..patch : {
- __start_gate_mckinley_e9_patchlist = .;
- *(.data..patch.mckinley_e9)
- __end_gate_mckinley_e9_patchlist = .;
-
- __start_gate_vtop_patchlist = .;
- *(.data..patch.vtop)
- __end_gate_vtop_patchlist = .;
-
- __start_gate_fsyscall_patchlist = .;
- *(.data..patch.fsyscall_table)
- __end_gate_fsyscall_patchlist = .;
-
- __start_gate_brl_fsys_bubble_down_patchlist = .;
- *(.data..patch.brl_fsys_bubble_down)
- __end_gate_brl_fsys_bubble_down_patchlist = .;
- } :readable
-
- .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
- .IA_64.unwind : { *(.IA_64.unwind*) } :readable :unwind
-#ifdef HAVE_BUGGY_SEGREL
- .text (GATE_ADDR + PAGE_SIZE) : { *(.text) *(.text.*) } :readable
-#else
- . = ALIGN(PERCPU_PAGE_SIZE) + (. & (PERCPU_PAGE_SIZE - 1));
- .text : { *(.text) *(.text.*) } :epc
-#endif
-
- /DISCARD/ : {
- *(.got.plt) *(.got)
- *(.data .data.* .gnu.linkonce.d.*)
- *(.dynbss)
- *(.bss .bss.* .gnu.linkonce.b.*)
- *(__ex_table)
- *(__mca_table)
- }
-}
-
-/*
- * ld does not recognize this name token; use the constant.
- */
-#define PT_IA_64_UNWIND 0x70000001
-
-/*
- * We must supply the ELF program headers explicitly to get just one
- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
- */
-PHDRS
-{
- readable PT_LOAD FILEHDR PHDRS FLAGS(4); /* PF_R */
-#ifndef HAVE_BUGGY_SEGREL
- epc PT_LOAD FILEHDR PHDRS FLAGS(1); /* PF_X */
-#endif
- dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
- note PT_NOTE FLAGS(4); /* PF_R */
- unwind PT_IA_64_UNWIND;
-}
-
-/*
- * This controls what symbols we export from the DSO.
- */
-VERSION
-{
- LINUX_2.5 {
- global:
- __kernel_syscall_via_break;
- __kernel_syscall_via_epc;
- __kernel_sigtramp;
-
- local: *;
- };
-}
-
-/* The ELF entry point can be used to set the AT_SYSINFO value. */
-ENTRY(__kernel_syscall_via_epc)
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
deleted file mode 100644
index 85c8a57da4..0000000000
--- a/arch/ia64/kernel/head.S
+++ /dev/null
@@ -1,1167 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Here is where the ball gets rolling as far as the kernel is concerned.
- * When control is transferred to _start, the bootload has already
- * loaded us to the correct address. All that's left to do here is
- * to set up the kernel's global pointer and jump to the kernel
- * entry point.
- *
- * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 Intel Corp.
- * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com>
- * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
- * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com>
- * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
- * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
- * Support for CPU Hotplug
- */
-
-#include <linux/export.h>
-#include <linux/pgtable.h>
-#include <asm/asmmacro.h>
-#include <asm/fpu.h>
-#include <asm/kregs.h>
-#include <asm/mmu_context.h>
-#include <asm/asm-offsets.h>
-#include <asm/pal.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/mca_asm.h>
-#include <linux/init.h>
-#include <linux/linkage.h>
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define SAL_PSR_BITS_TO_SET \
- (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL)
-
-#define SAVE_FROM_REG(src, ptr, dest) \
- mov dest=src;; \
- st8 [ptr]=dest,0x08
-
-#define RESTORE_REG(reg, ptr, _tmp) \
- ld8 _tmp=[ptr],0x08;; \
- mov reg=_tmp
-
-#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\
- mov ar.lc=IA64_NUM_DBG_REGS-1;; \
- mov _idx=0;; \
-1: \
- SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \
- add _idx=1,_idx;; \
- br.cloop.sptk.many 1b
-
-#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\
- mov ar.lc=IA64_NUM_DBG_REGS-1;; \
- mov _idx=0;; \
-_lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \
- add _idx=1, _idx;; \
- br.cloop.sptk.many _lbl
-
-#define SAVE_ONE_RR(num, _reg, _tmp) \
- movl _tmp=(num<<61);; \
- mov _reg=rr[_tmp]
-
-#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
- SAVE_ONE_RR(0,_r0, _tmp);; \
- SAVE_ONE_RR(1,_r1, _tmp);; \
- SAVE_ONE_RR(2,_r2, _tmp);; \
- SAVE_ONE_RR(3,_r3, _tmp);; \
- SAVE_ONE_RR(4,_r4, _tmp);; \
- SAVE_ONE_RR(5,_r5, _tmp);; \
- SAVE_ONE_RR(6,_r6, _tmp);; \
- SAVE_ONE_RR(7,_r7, _tmp);;
-
-#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
- st8 [ptr]=_r0, 8;; \
- st8 [ptr]=_r1, 8;; \
- st8 [ptr]=_r2, 8;; \
- st8 [ptr]=_r3, 8;; \
- st8 [ptr]=_r4, 8;; \
- st8 [ptr]=_r5, 8;; \
- st8 [ptr]=_r6, 8;; \
- st8 [ptr]=_r7, 8;;
-
-#define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \
- mov ar.lc=0x08-1;; \
- movl _idx1=0x00;; \
-RestRR: \
- dep.z _idx2=_idx1,61,3;; \
- ld8 _tmp=[ptr],8;; \
- mov rr[_idx2]=_tmp;; \
- srlz.d;; \
- add _idx1=1,_idx1;; \
- br.cloop.sptk.few RestRR
-
-#define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \
- movl reg1=sal_state_for_booting_cpu;; \
- ld8 reg2=[reg1];;
-
-/*
- * Adjust region registers saved before starting to save
- * break regs and rest of the states that need to be preserved.
- */
-#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \
- SAVE_FROM_REG(b0,_reg1,_reg2);; \
- SAVE_FROM_REG(b1,_reg1,_reg2);; \
- SAVE_FROM_REG(b2,_reg1,_reg2);; \
- SAVE_FROM_REG(b3,_reg1,_reg2);; \
- SAVE_FROM_REG(b4,_reg1,_reg2);; \
- SAVE_FROM_REG(b5,_reg1,_reg2);; \
- st8 [_reg1]=r1,0x08;; \
- st8 [_reg1]=r12,0x08;; \
- st8 [_reg1]=r13,0x08;; \
- SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \
- st8 [_reg1]=r4,0x08;; \
- st8 [_reg1]=r5,0x08;; \
- st8 [_reg1]=r6,0x08;; \
- st8 [_reg1]=r7,0x08;; \
- st8 [_reg1]=_pred,0x08;; \
- SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \
- stf.spill.nta [_reg1]=f2,16;; \
- stf.spill.nta [_reg1]=f3,16;; \
- stf.spill.nta [_reg1]=f4,16;; \
- stf.spill.nta [_reg1]=f5,16;; \
- stf.spill.nta [_reg1]=f16,16;; \
- stf.spill.nta [_reg1]=f17,16;; \
- stf.spill.nta [_reg1]=f18,16;; \
- stf.spill.nta [_reg1]=f19,16;; \
- stf.spill.nta [_reg1]=f20,16;; \
- stf.spill.nta [_reg1]=f21,16;; \
- stf.spill.nta [_reg1]=f22,16;; \
- stf.spill.nta [_reg1]=f23,16;; \
- stf.spill.nta [_reg1]=f24,16;; \
- stf.spill.nta [_reg1]=f25,16;; \
- stf.spill.nta [_reg1]=f26,16;; \
- stf.spill.nta [_reg1]=f27,16;; \
- stf.spill.nta [_reg1]=f28,16;; \
- stf.spill.nta [_reg1]=f29,16;; \
- stf.spill.nta [_reg1]=f30,16;; \
- stf.spill.nta [_reg1]=f31,16;;
-
-#else
-#define SET_AREA_FOR_BOOTING_CPU(a1, a2)
-#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3)
-#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
-#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
-#endif
-
-#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
- movl _tmp1=(num << 61);; \
- mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
- mov rr[_tmp1]=_tmp2
-
- __PAGE_ALIGNED_DATA
-
- .global empty_zero_page
-EXPORT_SYMBOL_GPL(empty_zero_page)
-empty_zero_page:
- .skip PAGE_SIZE
-
- .global swapper_pg_dir
-swapper_pg_dir:
- .skip PAGE_SIZE
-
- .rodata
-halt_msg:
- stringz "Halting kernel\n"
-
- __REF
-
- .global start_ap
-
- /*
- * Start the kernel. When the bootloader passes control to _start(), r28
- * points to the address of the boot parameter area. Execution reaches
- * here in physical mode.
- */
-GLOBAL_ENTRY(_start)
-start_ap:
- .prologue
- .save rp, r0 // terminate unwind chain with a NULL rp
- .body
-
- rsm psr.i | psr.ic
- ;;
- srlz.i
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- }
- ;;
- /*
- * Save the region registers, predicate before they get clobbered
- */
- SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15);
- mov r25=pr;;
-
- /*
- * Initialize kernel region registers:
- * rr[0]: VHPT enabled, page size = PAGE_SHIFT
- * rr[1]: VHPT enabled, page size = PAGE_SHIFT
- * rr[2]: VHPT enabled, page size = PAGE_SHIFT
- * rr[3]: VHPT enabled, page size = PAGE_SHIFT
- * rr[4]: VHPT enabled, page size = PAGE_SHIFT
- * rr[5]: VHPT enabled, page size = PAGE_SHIFT
- * rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
- * rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT
- * We initialize all of them to prevent inadvertently assuming
- * something about the state of address translation early in boot.
- */
- SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);;
- SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);;
- /*
- * Now pin mappings into the TLB for kernel text and data
- */
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- movl r17=KERNEL_START
- ;;
- mov cr.itir=r18
- mov cr.ifa=r17
- mov r16=IA64_TR_KERNEL
- mov r3=ip
- movl r18=PAGE_KERNEL
- ;;
- dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
- ;;
- or r18=r2,r18
- ;;
- srlz.i
- ;;
- itr.i itr[r16]=r18
- ;;
- itr.d dtr[r16]=r18
- ;;
- srlz.i
-
- /*
- * Switch into virtual mode:
- */
- movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
- |IA64_PSR_DI)
- ;;
- mov cr.ipsr=r16
- movl r17=1f
- ;;
- mov cr.iip=r17
- mov cr.ifs=r0
- ;;
- rfi
- ;;
-1: // now we are in virtual mode
-
- SET_AREA_FOR_BOOTING_CPU(r2, r16);
-
- STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15);
- SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25)
- ;;
-
- // set IVT entry point---can't access I/O ports without it
- movl r3=ia64_ivt
- ;;
- mov cr.iva=r3
- movl r2=FPSR_DEFAULT
- ;;
- srlz.i
- movl gp=__gp
-
- mov ar.fpsr=r2
- ;;
-
-#define isAP p2 // are we an Application Processor?
-#define isBP p3 // are we the Bootstrap Processor?
-
-#ifdef CONFIG_SMP
- /*
- * Find the init_task for the currently booting CPU. At poweron, and in
- * UP mode, task_for_booting_cpu is NULL.
- */
- movl r3=task_for_booting_cpu
- ;;
- ld8 r3=[r3]
- movl r2=init_task
- ;;
- cmp.eq isBP,isAP=r3,r0
- ;;
-(isAP) mov r2=r3
-#else
- movl r2=init_task
- cmp.eq isBP,isAP=r0,r0
-#endif
- ;;
- tpa r3=r2 // r3 == phys addr of task struct
- mov r16=-1
-(isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it
-
- // load mapping for stack (virtaddr in r2, physaddr in r3)
- rsm psr.ic
- movl r17=PAGE_KERNEL
- ;;
- srlz.d
- dep r18=0,r3,0,12
- ;;
- or r18=r17,r18
- dep r2=-1,r3,61,3 // IMVA of task
- ;;
- mov r17=rr[r2]
- shr.u r16=r3,IA64_GRANULE_SHIFT
- ;;
- dep r17=0,r17,8,24
- ;;
- mov cr.itir=r17
- mov cr.ifa=r2
-
- mov r19=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r19]=r18
- ;;
- ssm psr.ic
- srlz.d
- ;;
-
-.load_current:
- // load the "current" pointer (r13) and ar.k6 with the current task
- mov IA64_KR(CURRENT)=r2 // virtual address
- mov IA64_KR(CURRENT_STACK)=r16
- mov r13=r2
- /*
- * Reserve space at the top of the stack for "struct pt_regs". Kernel
- * threads don't store interesting values in that structure, but the space
- * still needs to be there because time-critical stuff such as the context
- * switching can be implemented more efficiently (for example, __switch_to()
- * always sets the psr.dfh bit of the task it is switching to).
- */
-
- addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
- addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE
- mov ar.rsc=0 // place RSE in enforced lazy mode
- ;;
- loadrs // clear the dirty partition
- movl r19=__phys_per_cpu_start
- mov r18=PERCPU_PAGE_SIZE
- ;;
-#ifndef CONFIG_SMP
- add r19=r19,r18
- ;;
-#else
-(isAP) br.few 2f
- movl r20=__cpu0_per_cpu
- ;;
- shr.u r18=r18,3
-1:
- ld8 r21=[r19],8;;
- st8[r20]=r21,8
- adds r18=-1,r18;;
- cmp4.lt p7,p6=0,r18
-(p7) br.cond.dptk.few 1b
- mov r19=r20
- ;;
-2:
-#endif
- tpa r19=r19
- ;;
- .pred.rel.mutex isBP,isAP
-(isBP) mov IA64_KR(PER_CPU_DATA)=r19 // per-CPU base for cpu0
-(isAP) mov IA64_KR(PER_CPU_DATA)=r0 // clear physical per-CPU base
- ;;
- mov ar.bspstore=r2 // establish the new RSE stack
- ;;
- mov ar.rsc=0x3 // place RSE in eager mode
-
-(isBP) dep r28=-1,r28,61,3 // make address virtual
-(isBP) movl r2=ia64_boot_param
- ;;
-(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
-
-#ifdef CONFIG_SMP
-(isAP) br.call.sptk.many rp=start_secondary
-.ret0:
-(isAP) br.cond.sptk self
-#endif
-
- // This is executed by the bootstrap processor (bsp) only:
-
- br.call.sptk.many rp=start_kernel
-.ret2: addl r3=@ltoff(halt_msg),gp
- ;;
- alloc r2=ar.pfs,8,0,2,0
- ;;
- ld8 out0=[r3]
- br.call.sptk.many b0=console_print
-
-self: hint @pause
- br.sptk.many self // endless loop
-END(_start)
-
- .text
-
-GLOBAL_ENTRY(ia64_save_debug_regs)
- alloc r16=ar.pfs,1,0,0,0
- mov r20=ar.lc // preserve ar.lc
- mov ar.lc=IA64_NUM_DBG_REGS-1
- mov r18=0
- add r19=IA64_NUM_DBG_REGS*8,in0
- ;;
-1: mov r16=dbr[r18]
-#ifdef CONFIG_ITANIUM
- ;;
- srlz.d
-#endif
- mov r17=ibr[r18]
- add r18=1,r18
- ;;
- st8.nta [in0]=r16,8
- st8.nta [r19]=r17,8
- br.cloop.sptk.many 1b
- ;;
- mov ar.lc=r20 // restore ar.lc
- br.ret.sptk.many rp
-END(ia64_save_debug_regs)
-
-GLOBAL_ENTRY(ia64_load_debug_regs)
- alloc r16=ar.pfs,1,0,0,0
- lfetch.nta [in0]
- mov r20=ar.lc // preserve ar.lc
- add r19=IA64_NUM_DBG_REGS*8,in0
- mov ar.lc=IA64_NUM_DBG_REGS-1
- mov r18=-1
- ;;
-1: ld8.nta r16=[in0],8
- ld8.nta r17=[r19],8
- add r18=1,r18
- ;;
- mov dbr[r18]=r16
-#ifdef CONFIG_ITANIUM
- ;;
- srlz.d // Errata 132 (NoFix status)
-#endif
- mov ibr[r18]=r17
- br.cloop.sptk.many 1b
- ;;
- mov ar.lc=r20 // restore ar.lc
- br.ret.sptk.many rp
-END(ia64_load_debug_regs)
-
-GLOBAL_ENTRY(__ia64_save_fpu)
- alloc r2=ar.pfs,1,4,0,0
- adds loc0=96*16-16,in0
- adds loc1=96*16-16-128,in0
- ;;
- stf.spill.nta [loc0]=f127,-256
- stf.spill.nta [loc1]=f119,-256
- ;;
- stf.spill.nta [loc0]=f111,-256
- stf.spill.nta [loc1]=f103,-256
- ;;
- stf.spill.nta [loc0]=f95,-256
- stf.spill.nta [loc1]=f87,-256
- ;;
- stf.spill.nta [loc0]=f79,-256
- stf.spill.nta [loc1]=f71,-256
- ;;
- stf.spill.nta [loc0]=f63,-256
- stf.spill.nta [loc1]=f55,-256
- adds loc2=96*16-32,in0
- ;;
- stf.spill.nta [loc0]=f47,-256
- stf.spill.nta [loc1]=f39,-256
- adds loc3=96*16-32-128,in0
- ;;
- stf.spill.nta [loc2]=f126,-256
- stf.spill.nta [loc3]=f118,-256
- ;;
- stf.spill.nta [loc2]=f110,-256
- stf.spill.nta [loc3]=f102,-256
- ;;
- stf.spill.nta [loc2]=f94,-256
- stf.spill.nta [loc3]=f86,-256
- ;;
- stf.spill.nta [loc2]=f78,-256
- stf.spill.nta [loc3]=f70,-256
- ;;
- stf.spill.nta [loc2]=f62,-256
- stf.spill.nta [loc3]=f54,-256
- adds loc0=96*16-48,in0
- ;;
- stf.spill.nta [loc2]=f46,-256
- stf.spill.nta [loc3]=f38,-256
- adds loc1=96*16-48-128,in0
- ;;
- stf.spill.nta [loc0]=f125,-256
- stf.spill.nta [loc1]=f117,-256
- ;;
- stf.spill.nta [loc0]=f109,-256
- stf.spill.nta [loc1]=f101,-256
- ;;
- stf.spill.nta [loc0]=f93,-256
- stf.spill.nta [loc1]=f85,-256
- ;;
- stf.spill.nta [loc0]=f77,-256
- stf.spill.nta [loc1]=f69,-256
- ;;
- stf.spill.nta [loc0]=f61,-256
- stf.spill.nta [loc1]=f53,-256
- adds loc2=96*16-64,in0
- ;;
- stf.spill.nta [loc0]=f45,-256
- stf.spill.nta [loc1]=f37,-256
- adds loc3=96*16-64-128,in0
- ;;
- stf.spill.nta [loc2]=f124,-256
- stf.spill.nta [loc3]=f116,-256
- ;;
- stf.spill.nta [loc2]=f108,-256
- stf.spill.nta [loc3]=f100,-256
- ;;
- stf.spill.nta [loc2]=f92,-256
- stf.spill.nta [loc3]=f84,-256
- ;;
- stf.spill.nta [loc2]=f76,-256
- stf.spill.nta [loc3]=f68,-256
- ;;
- stf.spill.nta [loc2]=f60,-256
- stf.spill.nta [loc3]=f52,-256
- adds loc0=96*16-80,in0
- ;;
- stf.spill.nta [loc2]=f44,-256
- stf.spill.nta [loc3]=f36,-256
- adds loc1=96*16-80-128,in0
- ;;
- stf.spill.nta [loc0]=f123,-256
- stf.spill.nta [loc1]=f115,-256
- ;;
- stf.spill.nta [loc0]=f107,-256
- stf.spill.nta [loc1]=f99,-256
- ;;
- stf.spill.nta [loc0]=f91,-256
- stf.spill.nta [loc1]=f83,-256
- ;;
- stf.spill.nta [loc0]=f75,-256
- stf.spill.nta [loc1]=f67,-256
- ;;
- stf.spill.nta [loc0]=f59,-256
- stf.spill.nta [loc1]=f51,-256
- adds loc2=96*16-96,in0
- ;;
- stf.spill.nta [loc0]=f43,-256
- stf.spill.nta [loc1]=f35,-256
- adds loc3=96*16-96-128,in0
- ;;
- stf.spill.nta [loc2]=f122,-256
- stf.spill.nta [loc3]=f114,-256
- ;;
- stf.spill.nta [loc2]=f106,-256
- stf.spill.nta [loc3]=f98,-256
- ;;
- stf.spill.nta [loc2]=f90,-256
- stf.spill.nta [loc3]=f82,-256
- ;;
- stf.spill.nta [loc2]=f74,-256
- stf.spill.nta [loc3]=f66,-256
- ;;
- stf.spill.nta [loc2]=f58,-256
- stf.spill.nta [loc3]=f50,-256
- adds loc0=96*16-112,in0
- ;;
- stf.spill.nta [loc2]=f42,-256
- stf.spill.nta [loc3]=f34,-256
- adds loc1=96*16-112-128,in0
- ;;
- stf.spill.nta [loc0]=f121,-256
- stf.spill.nta [loc1]=f113,-256
- ;;
- stf.spill.nta [loc0]=f105,-256
- stf.spill.nta [loc1]=f97,-256
- ;;
- stf.spill.nta [loc0]=f89,-256
- stf.spill.nta [loc1]=f81,-256
- ;;
- stf.spill.nta [loc0]=f73,-256
- stf.spill.nta [loc1]=f65,-256
- ;;
- stf.spill.nta [loc0]=f57,-256
- stf.spill.nta [loc1]=f49,-256
- adds loc2=96*16-128,in0
- ;;
- stf.spill.nta [loc0]=f41,-256
- stf.spill.nta [loc1]=f33,-256
- adds loc3=96*16-128-128,in0
- ;;
- stf.spill.nta [loc2]=f120,-256
- stf.spill.nta [loc3]=f112,-256
- ;;
- stf.spill.nta [loc2]=f104,-256
- stf.spill.nta [loc3]=f96,-256
- ;;
- stf.spill.nta [loc2]=f88,-256
- stf.spill.nta [loc3]=f80,-256
- ;;
- stf.spill.nta [loc2]=f72,-256
- stf.spill.nta [loc3]=f64,-256
- ;;
- stf.spill.nta [loc2]=f56,-256
- stf.spill.nta [loc3]=f48,-256
- ;;
- stf.spill.nta [loc2]=f40
- stf.spill.nta [loc3]=f32
- br.ret.sptk.many rp
-END(__ia64_save_fpu)
-
-GLOBAL_ENTRY(__ia64_load_fpu)
- alloc r2=ar.pfs,1,2,0,0
- adds r3=128,in0
- adds r14=256,in0
- adds r15=384,in0
- mov loc0=512
- mov loc1=-1024+16
- ;;
- ldf.fill.nta f32=[in0],loc0
- ldf.fill.nta f40=[ r3],loc0
- ldf.fill.nta f48=[r14],loc0
- ldf.fill.nta f56=[r15],loc0
- ;;
- ldf.fill.nta f64=[in0],loc0
- ldf.fill.nta f72=[ r3],loc0
- ldf.fill.nta f80=[r14],loc0
- ldf.fill.nta f88=[r15],loc0
- ;;
- ldf.fill.nta f96=[in0],loc1
- ldf.fill.nta f104=[ r3],loc1
- ldf.fill.nta f112=[r14],loc1
- ldf.fill.nta f120=[r15],loc1
- ;;
- ldf.fill.nta f33=[in0],loc0
- ldf.fill.nta f41=[ r3],loc0
- ldf.fill.nta f49=[r14],loc0
- ldf.fill.nta f57=[r15],loc0
- ;;
- ldf.fill.nta f65=[in0],loc0
- ldf.fill.nta f73=[ r3],loc0
- ldf.fill.nta f81=[r14],loc0
- ldf.fill.nta f89=[r15],loc0
- ;;
- ldf.fill.nta f97=[in0],loc1
- ldf.fill.nta f105=[ r3],loc1
- ldf.fill.nta f113=[r14],loc1
- ldf.fill.nta f121=[r15],loc1
- ;;
- ldf.fill.nta f34=[in0],loc0
- ldf.fill.nta f42=[ r3],loc0
- ldf.fill.nta f50=[r14],loc0
- ldf.fill.nta f58=[r15],loc0
- ;;
- ldf.fill.nta f66=[in0],loc0
- ldf.fill.nta f74=[ r3],loc0
- ldf.fill.nta f82=[r14],loc0
- ldf.fill.nta f90=[r15],loc0
- ;;
- ldf.fill.nta f98=[in0],loc1
- ldf.fill.nta f106=[ r3],loc1
- ldf.fill.nta f114=[r14],loc1
- ldf.fill.nta f122=[r15],loc1
- ;;
- ldf.fill.nta f35=[in0],loc0
- ldf.fill.nta f43=[ r3],loc0
- ldf.fill.nta f51=[r14],loc0
- ldf.fill.nta f59=[r15],loc0
- ;;
- ldf.fill.nta f67=[in0],loc0
- ldf.fill.nta f75=[ r3],loc0
- ldf.fill.nta f83=[r14],loc0
- ldf.fill.nta f91=[r15],loc0
- ;;
- ldf.fill.nta f99=[in0],loc1
- ldf.fill.nta f107=[ r3],loc1
- ldf.fill.nta f115=[r14],loc1
- ldf.fill.nta f123=[r15],loc1
- ;;
- ldf.fill.nta f36=[in0],loc0
- ldf.fill.nta f44=[ r3],loc0
- ldf.fill.nta f52=[r14],loc0
- ldf.fill.nta f60=[r15],loc0
- ;;
- ldf.fill.nta f68=[in0],loc0
- ldf.fill.nta f76=[ r3],loc0
- ldf.fill.nta f84=[r14],loc0
- ldf.fill.nta f92=[r15],loc0
- ;;
- ldf.fill.nta f100=[in0],loc1
- ldf.fill.nta f108=[ r3],loc1
- ldf.fill.nta f116=[r14],loc1
- ldf.fill.nta f124=[r15],loc1
- ;;
- ldf.fill.nta f37=[in0],loc0
- ldf.fill.nta f45=[ r3],loc0
- ldf.fill.nta f53=[r14],loc0
- ldf.fill.nta f61=[r15],loc0
- ;;
- ldf.fill.nta f69=[in0],loc0
- ldf.fill.nta f77=[ r3],loc0
- ldf.fill.nta f85=[r14],loc0
- ldf.fill.nta f93=[r15],loc0
- ;;
- ldf.fill.nta f101=[in0],loc1
- ldf.fill.nta f109=[ r3],loc1
- ldf.fill.nta f117=[r14],loc1
- ldf.fill.nta f125=[r15],loc1
- ;;
- ldf.fill.nta f38 =[in0],loc0
- ldf.fill.nta f46 =[ r3],loc0
- ldf.fill.nta f54 =[r14],loc0
- ldf.fill.nta f62 =[r15],loc0
- ;;
- ldf.fill.nta f70 =[in0],loc0
- ldf.fill.nta f78 =[ r3],loc0
- ldf.fill.nta f86 =[r14],loc0
- ldf.fill.nta f94 =[r15],loc0
- ;;
- ldf.fill.nta f102=[in0],loc1
- ldf.fill.nta f110=[ r3],loc1
- ldf.fill.nta f118=[r14],loc1
- ldf.fill.nta f126=[r15],loc1
- ;;
- ldf.fill.nta f39 =[in0],loc0
- ldf.fill.nta f47 =[ r3],loc0
- ldf.fill.nta f55 =[r14],loc0
- ldf.fill.nta f63 =[r15],loc0
- ;;
- ldf.fill.nta f71 =[in0],loc0
- ldf.fill.nta f79 =[ r3],loc0
- ldf.fill.nta f87 =[r14],loc0
- ldf.fill.nta f95 =[r15],loc0
- ;;
- ldf.fill.nta f103=[in0]
- ldf.fill.nta f111=[ r3]
- ldf.fill.nta f119=[r14]
- ldf.fill.nta f127=[r15]
- br.ret.sptk.many rp
-END(__ia64_load_fpu)
-
-GLOBAL_ENTRY(__ia64_init_fpu)
- stf.spill [sp]=f0 // M3
- mov f32=f0 // F
- nop.b 0
-
- ldfps f33,f34=[sp] // M0
- ldfps f35,f36=[sp] // M1
- mov f37=f0 // F
- ;;
-
- setf.s f38=r0 // M2
- setf.s f39=r0 // M3
- mov f40=f0 // F
-
- ldfps f41,f42=[sp] // M0
- ldfps f43,f44=[sp] // M1
- mov f45=f0 // F
-
- setf.s f46=r0 // M2
- setf.s f47=r0 // M3
- mov f48=f0 // F
-
- ldfps f49,f50=[sp] // M0
- ldfps f51,f52=[sp] // M1
- mov f53=f0 // F
-
- setf.s f54=r0 // M2
- setf.s f55=r0 // M3
- mov f56=f0 // F
-
- ldfps f57,f58=[sp] // M0
- ldfps f59,f60=[sp] // M1
- mov f61=f0 // F
-
- setf.s f62=r0 // M2
- setf.s f63=r0 // M3
- mov f64=f0 // F
-
- ldfps f65,f66=[sp] // M0
- ldfps f67,f68=[sp] // M1
- mov f69=f0 // F
-
- setf.s f70=r0 // M2
- setf.s f71=r0 // M3
- mov f72=f0 // F
-
- ldfps f73,f74=[sp] // M0
- ldfps f75,f76=[sp] // M1
- mov f77=f0 // F
-
- setf.s f78=r0 // M2
- setf.s f79=r0 // M3
- mov f80=f0 // F
-
- ldfps f81,f82=[sp] // M0
- ldfps f83,f84=[sp] // M1
- mov f85=f0 // F
-
- setf.s f86=r0 // M2
- setf.s f87=r0 // M3
- mov f88=f0 // F
-
- /*
- * When the instructions are cached, it would be faster to initialize
- * the remaining registers with simply mov instructions (F-unit).
- * This gets the time down to ~29 cycles. However, this would use up
- * 33 bundles, whereas continuing with the above pattern yields
- * 10 bundles and ~30 cycles.
- */
-
- ldfps f89,f90=[sp] // M0
- ldfps f91,f92=[sp] // M1
- mov f93=f0 // F
-
- setf.s f94=r0 // M2
- setf.s f95=r0 // M3
- mov f96=f0 // F
-
- ldfps f97,f98=[sp] // M0
- ldfps f99,f100=[sp] // M1
- mov f101=f0 // F
-
- setf.s f102=r0 // M2
- setf.s f103=r0 // M3
- mov f104=f0 // F
-
- ldfps f105,f106=[sp] // M0
- ldfps f107,f108=[sp] // M1
- mov f109=f0 // F
-
- setf.s f110=r0 // M2
- setf.s f111=r0 // M3
- mov f112=f0 // F
-
- ldfps f113,f114=[sp] // M0
- ldfps f115,f116=[sp] // M1
- mov f117=f0 // F
-
- setf.s f118=r0 // M2
- setf.s f119=r0 // M3
- mov f120=f0 // F
-
- ldfps f121,f122=[sp] // M0
- ldfps f123,f124=[sp] // M1
- mov f125=f0 // F
-
- setf.s f126=r0 // M2
- setf.s f127=r0 // M3
- br.ret.sptk.many rp // F
-END(__ia64_init_fpu)
-
-/*
- * Switch execution mode from virtual to physical
- *
- * Inputs:
- * r16 = new psr to establish
- * Output:
- * r19 = old virtual address of ar.bsp
- * r20 = old virtual address of sp
- *
- * Note: RSE must already be in enforced lazy mode
- */
-GLOBAL_ENTRY(ia64_switch_mode_phys)
- {
- rsm psr.i | psr.ic // disable interrupts and interrupt collection
- mov r15=ip
- }
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- }
- ;;
- mov cr.ipsr=r16 // set new PSR
- add r3=1f-ia64_switch_mode_phys,r15
-
- mov r19=ar.bsp
- mov r20=sp
- mov r14=rp // get return address into a general register
- ;;
-
- // going to physical mode, use tpa to translate virt->phys
- tpa r17=r19
- tpa r3=r3
- tpa sp=sp
- tpa r14=r14
- ;;
-
- mov r18=ar.rnat // save ar.rnat
- mov ar.bspstore=r17 // this steps on ar.rnat
- mov cr.iip=r3
- mov cr.ifs=r0
- ;;
- mov ar.rnat=r18 // restore ar.rnat
- rfi // must be last insn in group
- ;;
-1: mov rp=r14
- br.ret.sptk.many rp
-END(ia64_switch_mode_phys)
-
-/*
- * Switch execution mode from physical to virtual
- *
- * Inputs:
- * r16 = new psr to establish
- * r19 = new bspstore to establish
- * r20 = new sp to establish
- *
- * Note: RSE must already be in enforced lazy mode
- */
-GLOBAL_ENTRY(ia64_switch_mode_virt)
- {
- rsm psr.i | psr.ic // disable interrupts and interrupt collection
- mov r15=ip
- }
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- }
- ;;
- mov cr.ipsr=r16 // set new PSR
- add r3=1f-ia64_switch_mode_virt,r15
-
- mov r14=rp // get return address into a general register
- ;;
-
- // going to virtual
- // - for code addresses, set upper bits of addr to KERNEL_START
- // - for stack addresses, copy from input argument
- movl r18=KERNEL_START
- dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
- dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
- mov sp=r20
- ;;
- or r3=r3,r18
- or r14=r14,r18
- ;;
-
- mov r18=ar.rnat // save ar.rnat
- mov ar.bspstore=r19 // this steps on ar.rnat
- mov cr.iip=r3
- mov cr.ifs=r0
- ;;
- mov ar.rnat=r18 // restore ar.rnat
- rfi // must be last insn in group
- ;;
-1: mov rp=r14
- br.ret.sptk.many rp
-END(ia64_switch_mode_virt)
-
-GLOBAL_ENTRY(ia64_delay_loop)
- .prologue
-{ nop 0 // work around GAS unwind info generation bug...
- .save ar.lc,r2
- mov r2=ar.lc
- .body
- ;;
- mov ar.lc=r32
-}
- ;;
- // force loop to be 32-byte aligned (GAS bug means we cannot use .align
- // inside function body without corrupting unwind info).
-{ nop 0 }
-1: br.cloop.sptk.few 1b
- ;;
- mov ar.lc=r2
- br.ret.sptk.many rp
-END(ia64_delay_loop)
-
-/*
- * Return a CPU-local timestamp in nano-seconds. This timestamp is
- * NOT synchronized across CPUs its return value must never be
- * compared against the values returned on another CPU. The usage in
- * kernel/sched/core.c ensures that.
- *
- * The return-value of sched_clock() is NOT supposed to wrap-around.
- * If it did, it would cause some scheduling hiccups (at the worst).
- * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even
- * that would happen only once every 5+ years.
- *
- * The code below basically calculates:
- *
- * (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT
- *
- * except that the multiplication and the shift are done with 128-bit
- * intermediate precision so that we can produce a full 64-bit result.
- */
-GLOBAL_ENTRY(ia64_native_sched_clock)
- addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
- mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
- ;;
- ldf8 f8=[r8]
- ;;
- setf.sig f9=r9 // certain to stall, so issue it _after_ ldf8...
- ;;
- xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc)
- xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product
- ;;
- getf.sig r8=f10 // (5 cyc)
- getf.sig r9=f11
- ;;
- shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
- br.ret.sptk.many rp
-END(ia64_native_sched_clock)
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-GLOBAL_ENTRY(cycle_to_nsec)
- alloc r16=ar.pfs,1,0,0,0
- addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
- ;;
- ldf8 f8=[r8]
- ;;
- setf.sig f9=r32
- ;;
- xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc)
- xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product
- ;;
- getf.sig r8=f10 // (5 cyc)
- getf.sig r9=f11
- ;;
- shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
- br.ret.sptk.many rp
-END(cycle_to_nsec)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
-#ifdef CONFIG_IA64_BRL_EMU
-
-/*
- * Assembly routines used by brl_emu.c to set preserved register state.
- */
-
-#define SET_REG(reg) \
- GLOBAL_ENTRY(ia64_set_##reg); \
- alloc r16=ar.pfs,1,0,0,0; \
- mov reg=r32; \
- ;; \
- br.ret.sptk.many rp; \
- END(ia64_set_##reg)
-
-SET_REG(b1);
-SET_REG(b2);
-SET_REG(b3);
-SET_REG(b4);
-SET_REG(b5);
-
-#endif /* CONFIG_IA64_BRL_EMU */
-
-#ifdef CONFIG_SMP
-
-#ifdef CONFIG_HOTPLUG_CPU
-GLOBAL_ENTRY(ia64_jump_to_sal)
- alloc r16=ar.pfs,1,0,0,0;;
- rsm psr.i | psr.ic
-{
- flushrs
- srlz.i
-}
- tpa r25=in0
- movl r18=tlb_purge_done;;
- DATA_VA_TO_PA(r18);;
- mov b1=r18 // Return location
- movl r18=ia64_do_tlb_purge;;
- DATA_VA_TO_PA(r18);;
- mov b2=r18 // doing tlb_flush work
- mov ar.rsc=0 // Put RSE in enforced lazy, LE mode
- movl r17=1f;;
- DATA_VA_TO_PA(r17);;
- mov cr.iip=r17
- movl r16=SAL_PSR_BITS_TO_SET;;
- mov cr.ipsr=r16
- mov cr.ifs=r0;;
- rfi;; // note: this unmask MCA/INIT (psr.mc)
-1:
- /*
- * Invalidate all TLB data/inst
- */
- br.sptk.many b2;; // jump to tlb purge code
-
-tlb_purge_done:
- RESTORE_REGION_REGS(r25, r17,r18,r19);;
- RESTORE_REG(b0, r25, r17);;
- RESTORE_REG(b1, r25, r17);;
- RESTORE_REG(b2, r25, r17);;
- RESTORE_REG(b3, r25, r17);;
- RESTORE_REG(b4, r25, r17);;
- RESTORE_REG(b5, r25, r17);;
- ld8 r1=[r25],0x08;;
- ld8 r12=[r25],0x08;;
- ld8 r13=[r25],0x08;;
- RESTORE_REG(ar.fpsr, r25, r17);;
- RESTORE_REG(ar.pfs, r25, r17);;
- RESTORE_REG(ar.rnat, r25, r17);;
- RESTORE_REG(ar.unat, r25, r17);;
- RESTORE_REG(ar.bspstore, r25, r17);;
- RESTORE_REG(cr.dcr, r25, r17);;
- RESTORE_REG(cr.iva, r25, r17);;
- RESTORE_REG(cr.pta, r25, r17);;
- srlz.d;; // required not to violate RAW dependency
- RESTORE_REG(cr.itv, r25, r17);;
- RESTORE_REG(cr.pmv, r25, r17);;
- RESTORE_REG(cr.cmcv, r25, r17);;
- RESTORE_REG(cr.lrr0, r25, r17);;
- RESTORE_REG(cr.lrr1, r25, r17);;
- ld8 r4=[r25],0x08;;
- ld8 r5=[r25],0x08;;
- ld8 r6=[r25],0x08;;
- ld8 r7=[r25],0x08;;
- ld8 r17=[r25],0x08;;
- mov pr=r17,-1;;
- RESTORE_REG(ar.lc, r25, r17);;
- /*
- * Now Restore floating point regs
- */
- ldf.fill.nta f2=[r25],16;;
- ldf.fill.nta f3=[r25],16;;
- ldf.fill.nta f4=[r25],16;;
- ldf.fill.nta f5=[r25],16;;
- ldf.fill.nta f16=[r25],16;;
- ldf.fill.nta f17=[r25],16;;
- ldf.fill.nta f18=[r25],16;;
- ldf.fill.nta f19=[r25],16;;
- ldf.fill.nta f20=[r25],16;;
- ldf.fill.nta f21=[r25],16;;
- ldf.fill.nta f22=[r25],16;;
- ldf.fill.nta f23=[r25],16;;
- ldf.fill.nta f24=[r25],16;;
- ldf.fill.nta f25=[r25],16;;
- ldf.fill.nta f26=[r25],16;;
- ldf.fill.nta f27=[r25],16;;
- ldf.fill.nta f28=[r25],16;;
- ldf.fill.nta f29=[r25],16;;
- ldf.fill.nta f30=[r25],16;;
- ldf.fill.nta f31=[r25],16;;
-
- /*
- * Now that we have done all the register restores
- * we are now ready for the big DIVE to SAL Land
- */
- ssm psr.ic;;
- srlz.d;;
- br.ret.sptk.many b0;;
-END(ia64_jump_to_sal)
-#endif /* CONFIG_HOTPLUG_CPU */
-
-#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
deleted file mode 100644
index 99300850ab..0000000000
--- a/arch/ia64/kernel/iosapic.c
+++ /dev/null
@@ -1,1137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * I/O SAPIC support.
- *
- * Copyright (C) 1999 Intel Corp.
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
- * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
- *
- * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
- * APIC code. In particular, we now have separate
- * handlers for edge and level triggered
- * interrupts.
- * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
- * allocation PCI to vector mapping, shared PCI
- * interrupts.
- * 00/10/27 D. Mosberger Document things a bit more to make them more
- * understandable. Clean up much of the old
- * IOSAPIC cruft.
- * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts
- * and fixes for ACPI S5(SoftOff) support.
- * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
- * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt
- * vectors in iosapic_set_affinity(),
- * initializations for /proc/irq/#/smp_affinity
- * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
- * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
- * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
- * IOSAPIC mapping error
- * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
- * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
- * interrupt, vector, etc.)
- * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
- * pci_irq code.
- * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
- * Remove iosapic_address & gsi_base from
- * external interfaces. Rationalize
- * __init/__devinit attributes.
- * 04/12/04 Ashok Raj <ashok.raj@intel.com> Intel Corporation 2004
- * Updated to work with irq migration necessary
- * for CPU Hotplug
- */
-/*
- * Here is what the interrupt logic between a PCI device and the kernel looks
- * like:
- *
- * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
- * INTD). The device is uniquely identified by its bus-, and slot-number
- * (the function number does not matter here because all functions share
- * the same interrupt lines).
- *
- * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
- * controller. Multiple interrupt lines may have to share the same
- * IOSAPIC pin (if they're level triggered and use the same polarity).
- * Each interrupt line has a unique Global System Interrupt (GSI) number
- * which can be calculated as the sum of the controller's base GSI number
- * and the IOSAPIC pin number to which the line connects.
- *
- * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
- * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then
- * sent to the CPU.
- *
- * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
- * used as architecture-independent interrupt handling mechanism in Linux.
- * As an IRQ is a number, we have to have
- * IA-64 interrupt vector number <-> IRQ number mapping. On smaller
- * systems, we use one-to-one mapping between IA-64 vector and IRQ.
- *
- * To sum up, there are three levels of mappings involved:
- *
- * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
- *
- * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
- * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
- * (isa_irq) is the only exception in this source code.
- */
-
-#include <linux/acpi.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/memblock.h>
-
-#include <asm/delay.h>
-#include <asm/hw_irq.h>
-#include <asm/io.h>
-#include <asm/iosapic.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/xtp.h>
-
-#undef DEBUG_INTERRUPT_ROUTING
-
-#ifdef DEBUG_INTERRUPT_ROUTING
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-static DEFINE_SPINLOCK(iosapic_lock);
-
-/*
- * These tables map IA-64 vectors to the IOSAPIC pin that generates this
- * vector.
- */
-
-#define NO_REF_RTE 0
-
-static struct iosapic {
- char __iomem *addr; /* base address of IOSAPIC */
- unsigned int gsi_base; /* GSI base */
- unsigned short num_rte; /* # of RTEs on this IOSAPIC */
- int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
-#ifdef CONFIG_NUMA
- unsigned short node; /* numa node association via pxm */
-#endif
- spinlock_t lock; /* lock for indirect reg access */
-} iosapic_lists[NR_IOSAPICS];
-
-struct iosapic_rte_info {
- struct list_head rte_list; /* RTEs sharing the same vector */
- char rte_index; /* IOSAPIC RTE index */
- int refcnt; /* reference counter */
- struct iosapic *iosapic;
-} ____cacheline_aligned;
-
-static struct iosapic_intr_info {
- struct list_head rtes; /* RTEs using this vector (empty =>
- * not an IOSAPIC interrupt) */
- int count; /* # of registered RTEs */
- u32 low32; /* current value of low word of
- * Redirection table entry */
- unsigned int dest; /* destination CPU physical ID */
- unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
- unsigned char polarity: 1; /* interrupt polarity
- * (see iosapic.h) */
- unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
-} iosapic_intr_info[NR_IRQS];
-
-static unsigned char pcat_compat; /* 8259 compatibility flag */
-
-static inline void
-iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&iosapic->lock, flags);
- __iosapic_write(iosapic->addr, reg, val);
- spin_unlock_irqrestore(&iosapic->lock, flags);
-}
-
-/*
- * Find an IOSAPIC associated with a GSI
- */
-static inline int
-find_iosapic (unsigned int gsi)
-{
- int i;
-
- for (i = 0; i < NR_IOSAPICS; i++) {
- if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
- iosapic_lists[i].num_rte)
- return i;
- }
-
- return -1;
-}
-
-static inline int __gsi_to_irq(unsigned int gsi)
-{
- int irq;
- struct iosapic_intr_info *info;
- struct iosapic_rte_info *rte;
-
- for (irq = 0; irq < NR_IRQS; irq++) {
- info = &iosapic_intr_info[irq];
- list_for_each_entry(rte, &info->rtes, rte_list)
- if (rte->iosapic->gsi_base + rte->rte_index == gsi)
- return irq;
- }
- return -1;
-}
-
-int
-gsi_to_irq (unsigned int gsi)
-{
- unsigned long flags;
- int irq;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- irq = __gsi_to_irq(gsi);
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return irq;
-}
-
-static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
-{
- struct iosapic_rte_info *rte;
-
- list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
- if (rte->iosapic->gsi_base + rte->rte_index == gsi)
- return rte;
- return NULL;
-}
-
-static void
-set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
-{
- unsigned long pol, trigger, dmode;
- u32 low32, high32;
- int rte_index;
- char redir;
- struct iosapic_rte_info *rte;
- ia64_vector vector = irq_to_vector(irq);
-
- DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
-
- rte = find_rte(irq, gsi);
- if (!rte)
- return; /* not an IOSAPIC interrupt */
-
- rte_index = rte->rte_index;
- pol = iosapic_intr_info[irq].polarity;
- trigger = iosapic_intr_info[irq].trigger;
- dmode = iosapic_intr_info[irq].dmode;
-
- redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
-
-#ifdef CONFIG_SMP
- set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
-#endif
-
- low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
- (trigger << IOSAPIC_TRIGGER_SHIFT) |
- (dmode << IOSAPIC_DELIVERY_SHIFT) |
- ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
- vector);
-
- /* dest contains both id and eid */
- high32 = (dest << IOSAPIC_DEST_SHIFT);
-
- iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
- iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
- iosapic_intr_info[irq].low32 = low32;
- iosapic_intr_info[irq].dest = dest;
-}
-
-static void
-iosapic_nop (struct irq_data *data)
-{
- /* do nothing... */
-}
-
-
-#ifdef CONFIG_KEXEC
-void
-kexec_disable_iosapic(void)
-{
- struct iosapic_intr_info *info;
- struct iosapic_rte_info *rte;
- ia64_vector vec;
- int irq;
-
- for (irq = 0; irq < NR_IRQS; irq++) {
- info = &iosapic_intr_info[irq];
- vec = irq_to_vector(irq);
- list_for_each_entry(rte, &info->rtes,
- rte_list) {
- iosapic_write(rte->iosapic,
- IOSAPIC_RTE_LOW(rte->rte_index),
- IOSAPIC_MASK|vec);
- iosapic_eoi(rte->iosapic->addr, vec);
- }
- }
-}
-#endif
-
-static void
-mask_irq (struct irq_data *data)
-{
- unsigned int irq = data->irq;
- u32 low32;
- int rte_index;
- struct iosapic_rte_info *rte;
-
- if (!iosapic_intr_info[irq].count)
- return; /* not an IOSAPIC interrupt! */
-
- /* set only the mask bit */
- low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
- list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
- rte_index = rte->rte_index;
- iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
- }
-}
-
-static void
-unmask_irq (struct irq_data *data)
-{
- unsigned int irq = data->irq;
- u32 low32;
- int rte_index;
- struct iosapic_rte_info *rte;
-
- if (!iosapic_intr_info[irq].count)
- return; /* not an IOSAPIC interrupt! */
-
- low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
- list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
- rte_index = rte->rte_index;
- iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
- }
-}
-
-
-static int
-iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
-{
-#ifdef CONFIG_SMP
- unsigned int irq = data->irq;
- u32 high32, low32;
- int cpu, dest, rte_index;
- int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
- struct iosapic_rte_info *rte;
- struct iosapic *iosapic;
-
- irq &= (~IA64_IRQ_REDIRECTED);
-
- cpu = cpumask_first_and(cpu_online_mask, mask);
- if (cpu >= nr_cpu_ids)
- return -1;
-
- if (irq_prepare_move(irq, cpu))
- return -1;
-
- dest = cpu_physical_id(cpu);
-
- if (!iosapic_intr_info[irq].count)
- return -1; /* not an IOSAPIC interrupt */
-
- set_irq_affinity_info(irq, dest, redir);
-
- /* dest contains both id and eid */
- high32 = dest << IOSAPIC_DEST_SHIFT;
-
- low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
- if (redir)
- /* change delivery mode to lowest priority */
- low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
- else
- /* change delivery mode to fixed */
- low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
- low32 &= IOSAPIC_VECTOR_MASK;
- low32 |= irq_to_vector(irq);
-
- iosapic_intr_info[irq].low32 = low32;
- iosapic_intr_info[irq].dest = dest;
- list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
- iosapic = rte->iosapic;
- rte_index = rte->rte_index;
- iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
- iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
- }
-
-#endif
- return 0;
-}
-
-/*
- * Handlers for level-triggered interrupts.
- */
-
-static unsigned int
-iosapic_startup_level_irq (struct irq_data *data)
-{
- unmask_irq(data);
- return 0;
-}
-
-static void
-iosapic_unmask_level_irq (struct irq_data *data)
-{
- unsigned int irq = data->irq;
- ia64_vector vec = irq_to_vector(irq);
- struct iosapic_rte_info *rte;
- int do_unmask_irq = 0;
-
- irq_complete_move(irq);
- if (unlikely(irqd_is_setaffinity_pending(data))) {
- do_unmask_irq = 1;
- mask_irq(data);
- } else
- unmask_irq(data);
-
- list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
- iosapic_eoi(rte->iosapic->addr, vec);
-
- if (unlikely(do_unmask_irq)) {
- irq_move_masked_irq(data);
- unmask_irq(data);
- }
-}
-
-#define iosapic_shutdown_level_irq mask_irq
-#define iosapic_enable_level_irq unmask_irq
-#define iosapic_disable_level_irq mask_irq
-#define iosapic_ack_level_irq iosapic_nop
-
-static struct irq_chip irq_type_iosapic_level = {
- .name = "IO-SAPIC-level",
- .irq_startup = iosapic_startup_level_irq,
- .irq_shutdown = iosapic_shutdown_level_irq,
- .irq_enable = iosapic_enable_level_irq,
- .irq_disable = iosapic_disable_level_irq,
- .irq_ack = iosapic_ack_level_irq,
- .irq_mask = mask_irq,
- .irq_unmask = iosapic_unmask_level_irq,
- .irq_set_affinity = iosapic_set_affinity
-};
-
-/*
- * Handlers for edge-triggered interrupts.
- */
-
-static unsigned int
-iosapic_startup_edge_irq (struct irq_data *data)
-{
- unmask_irq(data);
- /*
- * IOSAPIC simply drops interrupts pended while the
- * corresponding pin was masked, so we can't know if an
- * interrupt is pending already. Let's hope not...
- */
- return 0;
-}
-
-static void
-iosapic_ack_edge_irq (struct irq_data *data)
-{
- irq_complete_move(data->irq);
- irq_move_irq(data);
-}
-
-#define iosapic_enable_edge_irq unmask_irq
-#define iosapic_disable_edge_irq iosapic_nop
-
-static struct irq_chip irq_type_iosapic_edge = {
- .name = "IO-SAPIC-edge",
- .irq_startup = iosapic_startup_edge_irq,
- .irq_shutdown = iosapic_disable_edge_irq,
- .irq_enable = iosapic_enable_edge_irq,
- .irq_disable = iosapic_disable_edge_irq,
- .irq_ack = iosapic_ack_edge_irq,
- .irq_mask = mask_irq,
- .irq_unmask = unmask_irq,
- .irq_set_affinity = iosapic_set_affinity
-};
-
-static unsigned int
-iosapic_version (char __iomem *addr)
-{
- /*
- * IOSAPIC Version Register return 32 bit structure like:
- * {
- * unsigned int version : 8;
- * unsigned int reserved1 : 8;
- * unsigned int max_redir : 8;
- * unsigned int reserved2 : 8;
- * }
- */
- return __iosapic_read(addr, IOSAPIC_VERSION);
-}
-
-static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
-{
- int i, irq = -ENOSPC, min_count = -1;
- struct iosapic_intr_info *info;
-
- /*
- * shared vectors for edge-triggered interrupts are not
- * supported yet
- */
- if (trigger == IOSAPIC_EDGE)
- return -EINVAL;
-
- for (i = 0; i < NR_IRQS; i++) {
- info = &iosapic_intr_info[i];
- if (info->trigger == trigger && info->polarity == pol &&
- (info->dmode == IOSAPIC_FIXED ||
- info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
- can_request_irq(i, IRQF_SHARED)) {
- if (min_count == -1 || info->count < min_count) {
- irq = i;
- min_count = info->count;
- }
- }
- }
- return irq;
-}
-
-/*
- * if the given vector is already owned by other,
- * assign a new vector for the other and make the vector available
- */
-static void __init
-iosapic_reassign_vector (int irq)
-{
- int new_irq;
-
- if (iosapic_intr_info[irq].count) {
- new_irq = create_irq();
- if (new_irq < 0)
- panic("%s: out of interrupt vectors!\n", __func__);
- printk(KERN_INFO "Reassigning vector %d to %d\n",
- irq_to_vector(irq), irq_to_vector(new_irq));
- memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
- sizeof(struct iosapic_intr_info));
- INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
- list_move(iosapic_intr_info[irq].rtes.next,
- &iosapic_intr_info[new_irq].rtes);
- memset(&iosapic_intr_info[irq], 0,
- sizeof(struct iosapic_intr_info));
- iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
- }
-}
-
-static inline int irq_is_shared (int irq)
-{
- return (iosapic_intr_info[irq].count > 1);
-}
-
-struct irq_chip*
-ia64_native_iosapic_get_irq_chip(unsigned long trigger)
-{
- if (trigger == IOSAPIC_EDGE)
- return &irq_type_iosapic_edge;
- else
- return &irq_type_iosapic_level;
-}
-
-static int
-register_intr (unsigned int gsi, int irq, unsigned char delivery,
- unsigned long polarity, unsigned long trigger)
-{
- struct irq_chip *chip, *irq_type;
- int index;
- struct iosapic_rte_info *rte;
-
- index = find_iosapic(gsi);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
- __func__, gsi);
- return -ENODEV;
- }
-
- rte = find_rte(irq, gsi);
- if (!rte) {
- rte = kzalloc(sizeof (*rte), GFP_ATOMIC);
- if (!rte) {
- printk(KERN_WARNING "%s: cannot allocate memory\n",
- __func__);
- return -ENOMEM;
- }
-
- rte->iosapic = &iosapic_lists[index];
- rte->rte_index = gsi - rte->iosapic->gsi_base;
- rte->refcnt++;
- list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
- iosapic_intr_info[irq].count++;
- iosapic_lists[index].rtes_inuse++;
- }
- else if (rte->refcnt == NO_REF_RTE) {
- struct iosapic_intr_info *info = &iosapic_intr_info[irq];
- if (info->count > 0 &&
- (info->trigger != trigger || info->polarity != polarity)){
- printk (KERN_WARNING
- "%s: cannot override the interrupt\n",
- __func__);
- return -EINVAL;
- }
- rte->refcnt++;
- iosapic_intr_info[irq].count++;
- iosapic_lists[index].rtes_inuse++;
- }
-
- iosapic_intr_info[irq].polarity = polarity;
- iosapic_intr_info[irq].dmode = delivery;
- iosapic_intr_info[irq].trigger = trigger;
-
- irq_type = iosapic_get_irq_chip(trigger);
-
- chip = irq_get_chip(irq);
- if (irq_type != NULL && chip != irq_type) {
- if (chip != &no_irq_chip)
- printk(KERN_WARNING
- "%s: changing vector %d from %s to %s\n",
- __func__, irq_to_vector(irq),
- chip->name, irq_type->name);
- chip = irq_type;
- }
- irq_set_chip_handler_name_locked(irq_get_irq_data(irq), chip,
- trigger == IOSAPIC_EDGE ? handle_edge_irq : handle_level_irq,
- NULL);
- return 0;
-}
-
-static unsigned int
-get_target_cpu (unsigned int gsi, int irq)
-{
-#ifdef CONFIG_SMP
- static int cpu = -1;
- extern int cpe_vector;
- cpumask_t domain = irq_to_domain(irq);
-
- /*
- * In case of vector shared by multiple RTEs, all RTEs that
- * share the vector need to use the same destination CPU.
- */
- if (iosapic_intr_info[irq].count)
- return iosapic_intr_info[irq].dest;
-
- /*
- * If the platform supports redirection via XTP, let it
- * distribute interrupts.
- */
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- return cpu_physical_id(smp_processor_id());
-
- /*
- * Some interrupts (ACPI SCI, for instance) are registered
- * before the BSP is marked as online.
- */
- if (!cpu_online(smp_processor_id()))
- return cpu_physical_id(smp_processor_id());
-
- if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
- return get_cpei_target_cpu();
-
-#ifdef CONFIG_NUMA
- {
- int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
- const struct cpumask *cpu_mask;
-
- iosapic_index = find_iosapic(gsi);
- if (iosapic_index < 0 ||
- iosapic_lists[iosapic_index].node == MAX_NUMNODES)
- goto skip_numa_setup;
-
- cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
- num_cpus = 0;
- for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
- if (cpu_online(numa_cpu))
- num_cpus++;
- }
-
- if (!num_cpus)
- goto skip_numa_setup;
-
- /* Use irq assignment to distribute across cpus in node */
- cpu_index = irq % num_cpus;
-
- for_each_cpu_and(numa_cpu, cpu_mask, &domain)
- if (cpu_online(numa_cpu) && i++ >= cpu_index)
- break;
-
- if (numa_cpu < nr_cpu_ids)
- return cpu_physical_id(numa_cpu);
- }
-skip_numa_setup:
-#endif
- /*
- * Otherwise, round-robin interrupt vectors across all the
- * processors. (It'd be nice if we could be smarter in the
- * case of NUMA.)
- */
- do {
- if (++cpu >= nr_cpu_ids)
- cpu = 0;
- } while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain));
-
- return cpu_physical_id(cpu);
-#else /* CONFIG_SMP */
- return cpu_physical_id(smp_processor_id());
-#endif
-}
-
-static inline unsigned char choose_dmode(void)
-{
-#ifdef CONFIG_SMP
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- return IOSAPIC_LOWEST_PRIORITY;
-#endif
- return IOSAPIC_FIXED;
-}
-
-/*
- * ACPI can describe IOSAPIC interrupts via static tables and namespace
- * methods. This provides an interface to register those interrupts and
- * program the IOSAPIC RTE.
- */
-int
-iosapic_register_intr (unsigned int gsi,
- unsigned long polarity, unsigned long trigger)
-{
- int irq, mask = 1, err;
- unsigned int dest;
- unsigned long flags;
- struct iosapic_rte_info *rte;
- u32 low32;
- unsigned char dmode;
- struct irq_desc *desc;
-
- /*
- * If this GSI has already been registered (i.e., it's a
- * shared interrupt, or we lost a race to register it),
- * don't touch the RTE.
- */
- spin_lock_irqsave(&iosapic_lock, flags);
- irq = __gsi_to_irq(gsi);
- if (irq > 0) {
- rte = find_rte(irq, gsi);
- if(iosapic_intr_info[irq].count == 0) {
- assign_irq_vector(irq);
- irq_init_desc(irq);
- } else if (rte->refcnt != NO_REF_RTE) {
- rte->refcnt++;
- goto unlock_iosapic_lock;
- }
- } else
- irq = create_irq();
-
- /* If vector is running out, we try to find a sharable vector */
- if (irq < 0) {
- irq = iosapic_find_sharable_irq(trigger, polarity);
- if (irq < 0)
- goto unlock_iosapic_lock;
- }
-
- desc = irq_to_desc(irq);
- raw_spin_lock(&desc->lock);
- dest = get_target_cpu(gsi, irq);
- dmode = choose_dmode();
- err = register_intr(gsi, irq, dmode, polarity, trigger);
- if (err < 0) {
- raw_spin_unlock(&desc->lock);
- irq = err;
- goto unlock_iosapic_lock;
- }
-
- /*
- * If the vector is shared and already unmasked for other
- * interrupt sources, don't mask it.
- */
- low32 = iosapic_intr_info[irq].low32;
- if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
- mask = 0;
- set_rte(gsi, irq, dest, mask);
-
- printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
- gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, irq_to_vector(irq));
-
- raw_spin_unlock(&desc->lock);
- unlock_iosapic_lock:
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return irq;
-}
-
-void
-iosapic_unregister_intr (unsigned int gsi)
-{
- unsigned long flags;
- int irq, index;
- u32 low32;
- unsigned long trigger, polarity;
- unsigned int dest;
- struct iosapic_rte_info *rte;
-
- /*
- * If the irq associated with the gsi is not found,
- * iosapic_unregister_intr() is unbalanced. We need to check
- * this again after getting locks.
- */
- irq = gsi_to_irq(gsi);
- if (irq < 0) {
- printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
- gsi);
- WARN_ON(1);
- return;
- }
-
- spin_lock_irqsave(&iosapic_lock, flags);
- if ((rte = find_rte(irq, gsi)) == NULL) {
- printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
- gsi);
- WARN_ON(1);
- goto out;
- }
-
- if (--rte->refcnt > 0)
- goto out;
-
- rte->refcnt = NO_REF_RTE;
-
- /* Mask the interrupt */
- low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
- iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
-
- iosapic_intr_info[irq].count--;
- index = find_iosapic(gsi);
- iosapic_lists[index].rtes_inuse--;
- WARN_ON(iosapic_lists[index].rtes_inuse < 0);
-
- trigger = iosapic_intr_info[irq].trigger;
- polarity = iosapic_intr_info[irq].polarity;
- dest = iosapic_intr_info[irq].dest;
- printk(KERN_INFO
- "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
- gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, irq_to_vector(irq));
-
- if (iosapic_intr_info[irq].count == 0) {
-#ifdef CONFIG_SMP
- /* Clear affinity */
- irq_data_update_affinity(irq_get_irq_data(irq), cpu_all_mask);
-#endif
- /* Clear the interrupt information */
- iosapic_intr_info[irq].dest = 0;
- iosapic_intr_info[irq].dmode = 0;
- iosapic_intr_info[irq].polarity = 0;
- iosapic_intr_info[irq].trigger = 0;
- iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
-
- /* Destroy and reserve IRQ */
- destroy_and_reserve_irq(irq);
- }
- out:
- spin_unlock_irqrestore(&iosapic_lock, flags);
-}
-
-/*
- * ACPI calls this when it finds an entry for a platform interrupt.
- */
-int __init
-iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
- int iosapic_vector, u16 eid, u16 id,
- unsigned long polarity, unsigned long trigger)
-{
- static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
- unsigned char delivery;
- int irq, vector, mask = 0;
- unsigned int dest = ((id << 8) | eid) & 0xffff;
-
- switch (int_type) {
- case ACPI_INTERRUPT_PMI:
- irq = vector = iosapic_vector;
- bind_irq_vector(irq, vector, CPU_MASK_ALL);
- /*
- * since PMI vector is alloc'd by FW(ACPI) not by kernel,
- * we need to make sure the vector is available
- */
- iosapic_reassign_vector(irq);
- delivery = IOSAPIC_PMI;
- break;
- case ACPI_INTERRUPT_INIT:
- irq = create_irq();
- if (irq < 0)
- panic("%s: out of interrupt vectors!\n", __func__);
- vector = irq_to_vector(irq);
- delivery = IOSAPIC_INIT;
- break;
- case ACPI_INTERRUPT_CPEI:
- irq = vector = IA64_CPE_VECTOR;
- BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
- delivery = IOSAPIC_FIXED;
- mask = 1;
- break;
- default:
- printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
- int_type);
- return -1;
- }
-
- register_intr(gsi, irq, delivery, polarity, trigger);
-
- printk(KERN_INFO
- "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
- " vector %d\n",
- int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
- int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, vector);
-
- set_rte(gsi, irq, dest, mask);
- return vector;
-}
-
-/*
- * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
- */
-void iosapic_override_isa_irq(unsigned int isa_irq, unsigned int gsi,
- unsigned long polarity, unsigned long trigger)
-{
- int vector, irq;
- unsigned int dest = cpu_physical_id(smp_processor_id());
- unsigned char dmode;
-
- irq = vector = isa_irq_to_vector(isa_irq);
- BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
- dmode = choose_dmode();
- register_intr(gsi, irq, dmode, polarity, trigger);
-
- DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
- isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
- polarity == IOSAPIC_POL_HIGH ? "high" : "low",
- cpu_logical_id(dest), dest, vector);
-
- set_rte(gsi, irq, dest, 1);
-}
-
-void __init
-ia64_native_iosapic_pcat_compat_init(void)
-{
- if (pcat_compat) {
- /*
- * Disable the compatibility mode interrupts (8259 style),
- * needs IN/OUT support enabled.
- */
- printk(KERN_INFO
- "%s: Disabling PC-AT compatible 8259 interrupts\n",
- __func__);
- outb(0xff, 0xA1);
- outb(0xff, 0x21);
- }
-}
-
-void __init
-iosapic_system_init (int system_pcat_compat)
-{
- int irq;
-
- for (irq = 0; irq < NR_IRQS; ++irq) {
- iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
- /* mark as unused */
- INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
-
- iosapic_intr_info[irq].count = 0;
- }
-
- pcat_compat = system_pcat_compat;
- if (pcat_compat)
- iosapic_pcat_compat_init();
-}
-
-static inline int
-iosapic_alloc (void)
-{
- int index;
-
- for (index = 0; index < NR_IOSAPICS; index++)
- if (!iosapic_lists[index].addr)
- return index;
-
- printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
- return -1;
-}
-
-static inline void
-iosapic_free (int index)
-{
- memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
-}
-
-static inline int
-iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
-{
- int index;
- unsigned int gsi_end, base, end;
-
- /* check gsi range */
- gsi_end = gsi_base + ((ver >> 16) & 0xff);
- for (index = 0; index < NR_IOSAPICS; index++) {
- if (!iosapic_lists[index].addr)
- continue;
-
- base = iosapic_lists[index].gsi_base;
- end = base + iosapic_lists[index].num_rte - 1;
-
- if (gsi_end < base || end < gsi_base)
- continue; /* OK */
-
- return -EBUSY;
- }
- return 0;
-}
-
-static int
-iosapic_delete_rte(unsigned int irq, unsigned int gsi)
-{
- struct iosapic_rte_info *rte, *temp;
-
- list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes,
- rte_list) {
- if (rte->iosapic->gsi_base + rte->rte_index == gsi) {
- if (rte->refcnt)
- return -EBUSY;
-
- list_del(&rte->rte_list);
- kfree(rte);
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
-{
- int num_rte, err, index;
- unsigned int isa_irq, ver;
- char __iomem *addr;
- unsigned long flags;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- index = find_iosapic(gsi_base);
- if (index >= 0) {
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return -EBUSY;
- }
-
- addr = ioremap(phys_addr, 0);
- if (addr == NULL) {
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return -ENOMEM;
- }
- ver = iosapic_version(addr);
- if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
- iounmap(addr);
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return err;
- }
-
- /*
- * The MAX_REDIR register holds the highest input pin number
- * (starting from 0). We add 1 so that we can use it for
- * number of pins (= RTEs)
- */
- num_rte = ((ver >> 16) & 0xff) + 1;
-
- index = iosapic_alloc();
- iosapic_lists[index].addr = addr;
- iosapic_lists[index].gsi_base = gsi_base;
- iosapic_lists[index].num_rte = num_rte;
-#ifdef CONFIG_NUMA
- iosapic_lists[index].node = MAX_NUMNODES;
-#endif
- spin_lock_init(&iosapic_lists[index].lock);
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- if ((gsi_base == 0) && pcat_compat) {
- /*
- * Map the legacy ISA devices into the IOSAPIC data. Some of
- * these may get reprogrammed later on with data from the ACPI
- * Interrupt Source Override table.
- */
- for (isa_irq = 0; isa_irq < 16; ++isa_irq)
- iosapic_override_isa_irq(isa_irq, isa_irq,
- IOSAPIC_POL_HIGH,
- IOSAPIC_EDGE);
- }
- return 0;
-}
-
-int iosapic_remove(unsigned int gsi_base)
-{
- int i, irq, index, err = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- index = find_iosapic(gsi_base);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
- __func__, gsi_base);
- goto out;
- }
-
- if (iosapic_lists[index].rtes_inuse) {
- err = -EBUSY;
- printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
- __func__, gsi_base);
- goto out;
- }
-
- for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) {
- irq = __gsi_to_irq(i);
- if (irq < 0)
- continue;
-
- err = iosapic_delete_rte(irq, i);
- if (err)
- goto out;
- }
-
- iounmap(iosapic_lists[index].addr);
- iosapic_free(index);
- out:
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return err;
-}
-
-#ifdef CONFIG_NUMA
-void map_iosapic_to_node(unsigned int gsi_base, int node)
-{
- int index;
-
- index = find_iosapic(gsi_base);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
- __func__, gsi_base);
- return;
- }
- iosapic_lists[index].node = node;
- return;
-}
-#endif
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
deleted file mode 100644
index 275b9ea58c..0000000000
--- a/arch/ia64/kernel/irq.c
+++ /dev/null
@@ -1,181 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/ia64/kernel/irq.c
- *
- * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
- *
- * This file contains the code used by various IRQ handling routines:
- * asking for different IRQs should be done through these routines
- * instead of just grabbing them. Thus setups with different IRQ numbers
- * shouldn't result in any weird surprises, and installing new handlers
- * should be easier.
- *
- * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
- *
- * 4/14/2004: Added code to handle cpu migration and do safe irq
- * migration without losing interrupts for iosapic
- * architecture.
- */
-
-#include <asm/delay.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-
-#include <asm/mca.h>
-#include <asm/xtp.h>
-
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
-{
- printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
-}
-
-/*
- * Interrupt statistics:
- */
-
-atomic_t irq_err_count;
-
-/*
- * /proc/interrupts printing:
- */
-int arch_show_interrupts(struct seq_file *p, int prec)
-{
- seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
- return 0;
-}
-
-#ifdef CONFIG_SMP
-static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
-
-void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
-{
- if (irq < NR_IRQS) {
- irq_data_update_affinity(irq_get_irq_data(irq),
- cpumask_of(cpu_logical_id(hwid)));
- irq_redir[irq] = (char) (redir & 0xff);
- }
-}
-#endif /* CONFIG_SMP */
-
-int __init arch_early_irq_init(void)
-{
- ia64_mca_irq_init();
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-unsigned int vectors_in_migration[NR_IRQS];
-
-/*
- * Since cpu_online_mask is already updated, we just need to check for
- * affinity that has zeros
- */
-static void migrate_irqs(void)
-{
- int irq, new_cpu;
-
- for (irq=0; irq < NR_IRQS; irq++) {
- struct irq_desc *desc = irq_to_desc(irq);
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
-
- if (irqd_irq_disabled(data))
- continue;
-
- /*
- * No handling for now.
- * TBD: Implement a disable function so we can now
- * tell CPU not to respond to these local intr sources.
- * such as ITV,CPEI,MCA etc.
- */
- if (irqd_is_per_cpu(data))
- continue;
-
- if (cpumask_any_and(irq_data_get_affinity_mask(data),
- cpu_online_mask) >= nr_cpu_ids) {
- /*
- * Save it for phase 2 processing
- */
- vectors_in_migration[irq] = irq;
-
- new_cpu = cpumask_any(cpu_online_mask);
-
- /*
- * Al three are essential, currently WARN_ON.. maybe panic?
- */
- if (chip && chip->irq_disable &&
- chip->irq_enable && chip->irq_set_affinity) {
- chip->irq_disable(data);
- chip->irq_set_affinity(data,
- cpumask_of(new_cpu), false);
- chip->irq_enable(data);
- } else {
- WARN_ON((!chip || !chip->irq_disable ||
- !chip->irq_enable ||
- !chip->irq_set_affinity));
- }
- }
- }
-}
-
-void fixup_irqs(void)
-{
- unsigned int irq;
- extern void ia64_process_pending_intr(void);
- extern volatile int time_keeper_id;
-
- /* Mask ITV to disable timer */
- ia64_set_itv(1 << 16);
-
- /*
- * Find a new timesync master
- */
- if (smp_processor_id() == time_keeper_id) {
- time_keeper_id = cpumask_first(cpu_online_mask);
- printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
- }
-
- /*
- * Phase 1: Locate IRQs bound to this cpu and
- * relocate them for cpu removal.
- */
- migrate_irqs();
-
- /*
- * Phase 2: Perform interrupt processing for all entries reported in
- * local APIC.
- */
- ia64_process_pending_intr();
-
- /*
- * Phase 3: Now handle any interrupts not captured in local APIC.
- * This is to account for cases that device interrupted during the time the
- * rte was being disabled and re-programmed.
- */
- for (irq=0; irq < NR_IRQS; irq++) {
- if (vectors_in_migration[irq]) {
- struct pt_regs *old_regs = set_irq_regs(NULL);
-
- vectors_in_migration[irq]=0;
- generic_handle_irq(irq);
- set_irq_regs(old_regs);
- }
- }
-
- /*
- * Now let processor die. We do irq disable and max_xtp() to
- * ensure there is no more interrupts routed to this processor.
- * But the local timer interrupt can have 1 pending which we
- * take care in timer_interrupt().
- */
- max_xtp();
- local_irq_disable();
-}
-#endif
diff --git a/arch/ia64/kernel/irq.h b/arch/ia64/kernel/irq.h
deleted file mode 100644
index 4d16f3cbeb..0000000000
--- a/arch/ia64/kernel/irq.h
+++ /dev/null
@@ -1,3 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-extern void register_percpu_irq(ia64_vector vec, irq_handler_t handler,
- unsigned long flags, const char *name);
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
deleted file mode 100644
index 46e33c5cb5..0000000000
--- a/arch/ia64/kernel/irq_ia64.c
+++ /dev/null
@@ -1,645 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/ia64/kernel/irq_ia64.c
- *
- * Copyright (C) 1998-2001 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 6/10/99: Updated to bring in sync with x86 version to facilitate
- * support for SMP and different interrupt controllers.
- *
- * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
- * PCI to vector allocation routine.
- * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
- * Added CPU Hotplug handling for IPF.
- */
-
-#include <linux/module.h>
-#include <linux/pgtable.h>
-
-#include <linux/jiffies.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel_stat.h>
-#include <linux/ptrace.h>
-#include <linux/signal.h>
-#include <linux/smp.h>
-#include <linux/threads.h>
-#include <linux/bitops.h>
-#include <linux/irq.h>
-#include <linux/ratelimit.h>
-#include <linux/acpi.h>
-#include <linux/sched.h>
-
-#include <asm/delay.h>
-#include <asm/intrinsics.h>
-#include <asm/io.h>
-#include <asm/hw_irq.h>
-#include <asm/tlbflush.h>
-
-#define IRQ_DEBUG 0
-
-#define IRQ_VECTOR_UNASSIGNED (0)
-
-#define IRQ_UNUSED (0)
-#define IRQ_USED (1)
-#define IRQ_RSVD (2)
-
-int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
-int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
-
-/* default base addr of IPI table */
-void __iomem *ipi_base_addr = ((void __iomem *)
- (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
-
-static cpumask_t vector_allocation_domain(int cpu);
-
-/*
- * Legacy IRQ to IA-64 vector translation table.
- */
-__u8 isa_irq_to_vector_map[16] = {
- /* 8259 IRQ translation, first 16 entries */
- 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
- 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
-};
-EXPORT_SYMBOL(isa_irq_to_vector_map);
-
-DEFINE_SPINLOCK(vector_lock);
-
-struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
- [0 ... NR_IRQS - 1] = {
- .vector = IRQ_VECTOR_UNASSIGNED,
- .domain = CPU_MASK_NONE
- }
-};
-
-DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
- [0 ... IA64_NUM_VECTORS - 1] = -1
-};
-
-static cpumask_t vector_table[IA64_NUM_VECTORS] = {
- [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
-};
-
-static int irq_status[NR_IRQS] = {
- [0 ... NR_IRQS -1] = IRQ_UNUSED
-};
-
-static inline int find_unassigned_irq(void)
-{
- int irq;
-
- for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
- if (irq_status[irq] == IRQ_UNUSED)
- return irq;
- return -ENOSPC;
-}
-
-static inline int find_unassigned_vector(cpumask_t domain)
-{
- cpumask_t mask;
- int pos, vector;
-
- cpumask_and(&mask, &domain, cpu_online_mask);
- if (cpumask_empty(&mask))
- return -EINVAL;
-
- for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
- vector = IA64_FIRST_DEVICE_VECTOR + pos;
- cpumask_and(&mask, &domain, &vector_table[vector]);
- if (!cpumask_empty(&mask))
- continue;
- return vector;
- }
- return -ENOSPC;
-}
-
-static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
-{
- cpumask_t mask;
- int cpu;
- struct irq_cfg *cfg = &irq_cfg[irq];
-
- BUG_ON((unsigned)irq >= NR_IRQS);
- BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
-
- cpumask_and(&mask, &domain, cpu_online_mask);
- if (cpumask_empty(&mask))
- return -EINVAL;
- if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
- return 0;
- if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
- return -EBUSY;
- for_each_cpu(cpu, &mask)
- per_cpu(vector_irq, cpu)[vector] = irq;
- cfg->vector = vector;
- cfg->domain = domain;
- irq_status[irq] = IRQ_USED;
- cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
- return 0;
-}
-
-int bind_irq_vector(int irq, int vector, cpumask_t domain)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&vector_lock, flags);
- ret = __bind_irq_vector(irq, vector, domain);
- spin_unlock_irqrestore(&vector_lock, flags);
- return ret;
-}
-
-static void __clear_irq_vector(int irq)
-{
- int vector, cpu;
- cpumask_t domain;
- struct irq_cfg *cfg = &irq_cfg[irq];
-
- BUG_ON((unsigned)irq >= NR_IRQS);
- BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
- vector = cfg->vector;
- domain = cfg->domain;
- for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
- per_cpu(vector_irq, cpu)[vector] = -1;
- cfg->vector = IRQ_VECTOR_UNASSIGNED;
- cfg->domain = CPU_MASK_NONE;
- irq_status[irq] = IRQ_UNUSED;
- cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
-}
-
-static void clear_irq_vector(int irq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&vector_lock, flags);
- __clear_irq_vector(irq);
- spin_unlock_irqrestore(&vector_lock, flags);
-}
-
-int
-ia64_native_assign_irq_vector (int irq)
-{
- unsigned long flags;
- int vector, cpu;
- cpumask_t domain = CPU_MASK_NONE;
-
- vector = -ENOSPC;
-
- spin_lock_irqsave(&vector_lock, flags);
- for_each_online_cpu(cpu) {
- domain = vector_allocation_domain(cpu);
- vector = find_unassigned_vector(domain);
- if (vector >= 0)
- break;
- }
- if (vector < 0)
- goto out;
- if (irq == AUTO_ASSIGN)
- irq = vector;
- BUG_ON(__bind_irq_vector(irq, vector, domain));
- out:
- spin_unlock_irqrestore(&vector_lock, flags);
- return vector;
-}
-
-void
-ia64_native_free_irq_vector (int vector)
-{
- if (vector < IA64_FIRST_DEVICE_VECTOR ||
- vector > IA64_LAST_DEVICE_VECTOR)
- return;
- clear_irq_vector(vector);
-}
-
-int
-reserve_irq_vector (int vector)
-{
- if (vector < IA64_FIRST_DEVICE_VECTOR ||
- vector > IA64_LAST_DEVICE_VECTOR)
- return -EINVAL;
- return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
-}
-
-/*
- * Initialize vector_irq on a new cpu. This function must be called
- * with vector_lock held.
- */
-void __setup_vector_irq(int cpu)
-{
- int irq, vector;
-
- /* Clear vector_irq */
- for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
- per_cpu(vector_irq, cpu)[vector] = -1;
- /* Mark the inuse vectors */
- for (irq = 0; irq < NR_IRQS; ++irq) {
- if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
- continue;
- vector = irq_to_vector(irq);
- per_cpu(vector_irq, cpu)[vector] = irq;
- }
-}
-
-#ifdef CONFIG_SMP
-
-static enum vector_domain_type {
- VECTOR_DOMAIN_NONE,
- VECTOR_DOMAIN_PERCPU
-} vector_domain_type = VECTOR_DOMAIN_NONE;
-
-static cpumask_t vector_allocation_domain(int cpu)
-{
- if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
- return *cpumask_of(cpu);
- return CPU_MASK_ALL;
-}
-
-static int __irq_prepare_move(int irq, int cpu)
-{
- struct irq_cfg *cfg = &irq_cfg[irq];
- int vector;
- cpumask_t domain;
-
- if (cfg->move_in_progress || cfg->move_cleanup_count)
- return -EBUSY;
- if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
- return -EINVAL;
- if (cpumask_test_cpu(cpu, &cfg->domain))
- return 0;
- domain = vector_allocation_domain(cpu);
- vector = find_unassigned_vector(domain);
- if (vector < 0)
- return -ENOSPC;
- cfg->move_in_progress = 1;
- cfg->old_domain = cfg->domain;
- cfg->vector = IRQ_VECTOR_UNASSIGNED;
- cfg->domain = CPU_MASK_NONE;
- BUG_ON(__bind_irq_vector(irq, vector, domain));
- return 0;
-}
-
-int irq_prepare_move(int irq, int cpu)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&vector_lock, flags);
- ret = __irq_prepare_move(irq, cpu);
- spin_unlock_irqrestore(&vector_lock, flags);
- return ret;
-}
-
-void irq_complete_move(unsigned irq)
-{
- struct irq_cfg *cfg = &irq_cfg[irq];
- cpumask_t cleanup_mask;
- int i;
-
- if (likely(!cfg->move_in_progress))
- return;
-
- if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
- return;
-
- cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
- cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
- for_each_cpu(i, &cleanup_mask)
- ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
- cfg->move_in_progress = 0;
-}
-
-static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
-{
- int me = smp_processor_id();
- ia64_vector vector;
- unsigned long flags;
-
- for (vector = IA64_FIRST_DEVICE_VECTOR;
- vector < IA64_LAST_DEVICE_VECTOR; vector++) {
- int irq;
- struct irq_desc *desc;
- struct irq_cfg *cfg;
- irq = __this_cpu_read(vector_irq[vector]);
- if (irq < 0)
- continue;
-
- desc = irq_to_desc(irq);
- cfg = irq_cfg + irq;
- raw_spin_lock(&desc->lock);
- if (!cfg->move_cleanup_count)
- goto unlock;
-
- if (!cpumask_test_cpu(me, &cfg->old_domain))
- goto unlock;
-
- spin_lock_irqsave(&vector_lock, flags);
- __this_cpu_write(vector_irq[vector], -1);
- cpumask_clear_cpu(me, &vector_table[vector]);
- spin_unlock_irqrestore(&vector_lock, flags);
- cfg->move_cleanup_count--;
- unlock:
- raw_spin_unlock(&desc->lock);
- }
- return IRQ_HANDLED;
-}
-
-static int __init parse_vector_domain(char *arg)
-{
- if (!arg)
- return -EINVAL;
- if (!strcmp(arg, "percpu")) {
- vector_domain_type = VECTOR_DOMAIN_PERCPU;
- no_int_routing = 1;
- }
- return 0;
-}
-early_param("vector", parse_vector_domain);
-#else
-static cpumask_t vector_allocation_domain(int cpu)
-{
- return CPU_MASK_ALL;
-}
-#endif
-
-
-void destroy_and_reserve_irq(unsigned int irq)
-{
- unsigned long flags;
-
- irq_init_desc(irq);
- spin_lock_irqsave(&vector_lock, flags);
- __clear_irq_vector(irq);
- irq_status[irq] = IRQ_RSVD;
- spin_unlock_irqrestore(&vector_lock, flags);
-}
-
-/*
- * Dynamic irq allocate and deallocation for MSI
- */
-int create_irq(void)
-{
- unsigned long flags;
- int irq, vector, cpu;
- cpumask_t domain = CPU_MASK_NONE;
-
- irq = vector = -ENOSPC;
- spin_lock_irqsave(&vector_lock, flags);
- for_each_online_cpu(cpu) {
- domain = vector_allocation_domain(cpu);
- vector = find_unassigned_vector(domain);
- if (vector >= 0)
- break;
- }
- if (vector < 0)
- goto out;
- irq = find_unassigned_irq();
- if (irq < 0)
- goto out;
- BUG_ON(__bind_irq_vector(irq, vector, domain));
- out:
- spin_unlock_irqrestore(&vector_lock, flags);
- if (irq >= 0)
- irq_init_desc(irq);
- return irq;
-}
-
-void destroy_irq(unsigned int irq)
-{
- irq_init_desc(irq);
- clear_irq_vector(irq);
-}
-
-#ifdef CONFIG_SMP
-# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
-# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
-#else
-# define IS_RESCHEDULE(vec) (0)
-# define IS_LOCAL_TLB_FLUSH(vec) (0)
-#endif
-/*
- * That's where the IVT branches when we get an external
- * interrupt. This branches to the correct hardware IRQ handler via
- * function ptr.
- */
-void
-ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned long saved_tpr;
-
-#if IRQ_DEBUG
- {
- unsigned long bsp, sp;
-
- /*
- * Note: if the interrupt happened while executing in
- * the context switch routine (ia64_switch_to), we may
- * get a spurious stack overflow here. This is
- * because the register and the memory stack are not
- * switched atomically.
- */
- bsp = ia64_getreg(_IA64_REG_AR_BSP);
- sp = ia64_getreg(_IA64_REG_SP);
-
- if ((sp - bsp) < 1024) {
- static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
-
- if (__ratelimit(&ratelimit)) {
- printk("ia64_handle_irq: DANGER: less than "
- "1KB of free stack space!!\n"
- "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
- }
- }
- }
-#endif /* IRQ_DEBUG */
-
- /*
- * Always set TPR to limit maximum interrupt nesting depth to
- * 16 (without this, it would be ~240, which could easily lead
- * to kernel stack overflows).
- */
- irq_enter();
- saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
- ia64_srlz_d();
- while (vector != IA64_SPURIOUS_INT_VECTOR) {
- int irq = local_vector_to_irq(vector);
-
- if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
- smp_local_flush_tlb();
- kstat_incr_irq_this_cpu(irq);
- } else if (unlikely(IS_RESCHEDULE(vector))) {
- scheduler_ipi();
- kstat_incr_irq_this_cpu(irq);
- } else {
- ia64_setreg(_IA64_REG_CR_TPR, vector);
- ia64_srlz_d();
-
- if (unlikely(irq < 0)) {
- printk(KERN_ERR "%s: Unexpected interrupt "
- "vector %d on CPU %d is not mapped "
- "to any IRQ!\n", __func__, vector,
- smp_processor_id());
- } else
- generic_handle_irq(irq);
-
- /*
- * Disable interrupts and send EOI:
- */
- local_irq_disable();
- ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
- }
- ia64_eoi();
- vector = ia64_get_ivr();
- }
- /*
- * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
- * handler needs to be able to wait for further keyboard interrupts, which can't
- * come through until ia64_eoi() has been done.
- */
- irq_exit();
- set_irq_regs(old_regs);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * This function emulates a interrupt processing when a cpu is about to be
- * brought down.
- */
-void ia64_process_pending_intr(void)
-{
- ia64_vector vector;
- unsigned long saved_tpr;
- extern unsigned int vectors_in_migration[NR_IRQS];
-
- vector = ia64_get_ivr();
-
- irq_enter();
- saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
- ia64_srlz_d();
-
- /*
- * Perform normal interrupt style processing
- */
- while (vector != IA64_SPURIOUS_INT_VECTOR) {
- int irq = local_vector_to_irq(vector);
-
- if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
- smp_local_flush_tlb();
- kstat_incr_irq_this_cpu(irq);
- } else if (unlikely(IS_RESCHEDULE(vector))) {
- kstat_incr_irq_this_cpu(irq);
- } else {
- struct pt_regs *old_regs = set_irq_regs(NULL);
-
- ia64_setreg(_IA64_REG_CR_TPR, vector);
- ia64_srlz_d();
-
- /*
- * Now try calling normal ia64_handle_irq as it would have got called
- * from a real intr handler. Try passing null for pt_regs, hopefully
- * it will work. I hope it works!.
- * Probably could shared code.
- */
- if (unlikely(irq < 0)) {
- printk(KERN_ERR "%s: Unexpected interrupt "
- "vector %d on CPU %d not being mapped "
- "to any IRQ!!\n", __func__, vector,
- smp_processor_id());
- } else {
- vectors_in_migration[irq]=0;
- generic_handle_irq(irq);
- }
- set_irq_regs(old_regs);
-
- /*
- * Disable interrupts and send EOI
- */
- local_irq_disable();
- ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
- }
- ia64_eoi();
- vector = ia64_get_ivr();
- }
- irq_exit();
-}
-#endif
-
-
-#ifdef CONFIG_SMP
-
-static irqreturn_t dummy_handler (int irq, void *dev_id)
-{
- BUG();
- return IRQ_NONE;
-}
-
-/*
- * KVM uses this interrupt to force a cpu out of guest mode
- */
-
-#endif
-
-void
-register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
- const char *name)
-{
- unsigned int irq;
-
- irq = vec;
- BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
- irq_set_status_flags(irq, IRQ_PER_CPU);
- irq_set_chip(irq, &irq_type_ia64_lsapic);
- if (handler)
- if (request_irq(irq, handler, flags, name, NULL))
- pr_err("Failed to request irq %u (%s)\n", irq, name);
- irq_set_handler(irq, handle_percpu_irq);
-}
-
-void __init
-ia64_native_register_ipi(void)
-{
-#ifdef CONFIG_SMP
- register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
- register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
- register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
- "tlb_flush");
-#endif
-}
-
-void __init
-init_IRQ (void)
-{
- acpi_boot_init();
- ia64_register_ipi();
- register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
-#ifdef CONFIG_SMP
- if (vector_domain_type != VECTOR_DOMAIN_NONE) {
- register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
- smp_irq_move_cleanup_interrupt, 0,
- "irq_move");
- }
-#endif
-}
-
-void
-ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
-{
- void __iomem *ipi_addr;
- unsigned long ipi_data;
- unsigned long phys_cpu_id;
-
- phys_cpu_id = cpu_physical_id(cpu);
-
- /*
- * cpu number is in 8bit ID and 8bit EID
- */
-
- ipi_data = (delivery_mode << 8) | (vector & 0xff);
- ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
-
- writeq(ipi_data, ipi_addr);
-}
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
deleted file mode 100644
index 23bf4499a7..0000000000
--- a/arch/ia64/kernel/irq_lsapic.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * LSAPIC Interrupt Controller
- *
- * This takes care of interrupts that are generated by the CPU's
- * internal Streamlined Advanced Programmable Interrupt Controller
- * (LSAPIC), such as the ITC and IPI interrupts.
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 2000 Hewlett-Packard Co
- * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/sched.h>
-#include <linux/irq.h>
-
-static unsigned int
-lsapic_noop_startup (struct irq_data *data)
-{
- return 0;
-}
-
-static void
-lsapic_noop (struct irq_data *data)
-{
- /* nothing to do... */
-}
-
-static int lsapic_retrigger(struct irq_data *data)
-{
- ia64_resend_irq(data->irq);
-
- return 1;
-}
-
-struct irq_chip irq_type_ia64_lsapic = {
- .name = "LSAPIC",
- .irq_startup = lsapic_noop_startup,
- .irq_shutdown = lsapic_noop,
- .irq_enable = lsapic_noop,
- .irq_disable = lsapic_noop,
- .irq_ack = lsapic_noop,
- .irq_retrigger = lsapic_retrigger,
-};
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
deleted file mode 100644
index da90c49df6..0000000000
--- a/arch/ia64/kernel/ivt.S
+++ /dev/null
@@ -1,1688 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/ia64/kernel/ivt.S
- *
- * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger <davidm@hpl.hp.com>
- * Copyright (C) 2000, 2002-2003 Intel Co
- * Asit Mallick <asit.k.mallick@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Kenneth Chen <kenneth.w.chen@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- *
- * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
- * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
- *
- * Copyright (C) 2005 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- * Xen paravirtualization
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * pv_ops.
- * Yaozu (Eddie) Dong <eddie.dong@intel.com>
- */
-/*
- * This file defines the interruption vector table used by the CPU.
- * It does not include one entry per possible cause of interruption.
- *
- * The first 20 entries of the table contain 64 bundles each while the
- * remaining 48 entries contain only 16 bundles each.
- *
- * The 64 bundles are used to allow inlining the whole handler for critical
- * interruptions like TLB misses.
- *
- * For each entry, the comment is as follows:
- *
- * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
- * entry offset ----/ / / / /
- * entry number ---------/ / / /
- * size of the entry -------------/ / /
- * vector name -------------------------------------/ /
- * interruptions triggering this vector ----------------------/
- *
- * The table is 32KB in size and must be aligned on 32KB boundary.
- * (The CPU ignores the 15 lower bits of the address)
- *
- * Table is based upon EAS2.6 (Oct 1999)
- */
-
-#include <linux/export.h>
-#include <linux/pgtable.h>
-#include <asm/asmmacro.h>
-#include <asm/break.h>
-#include <asm/kregs.h>
-#include <asm/asm-offsets.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/unistd.h>
-#include <asm/errno.h>
-
-#if 0
-# define PSR_DEFAULT_BITS psr.ac
-#else
-# define PSR_DEFAULT_BITS 0
-#endif
-
-#if 0
- /*
- * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
- * needed for something else before enabling this...
- */
-# define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
-#else
-# define DBG_FAULT(i)
-#endif
-
-#include "minstate.h"
-
-#define FAULT(n) \
- mov r31=pr; \
- mov r19=n;; /* prepare to save predicates */ \
- br.sptk.many dispatch_to_fault_handler
-
- .section .text..ivt,"ax"
-
- .align 32768 // align on 32KB boundary
- .global ia64_ivt
- EXPORT_SYMBOL(ia64_ivt)
-ia64_ivt:
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
-ENTRY(vhpt_miss)
- DBG_FAULT(0)
- /*
- * The VHPT vector is invoked when the TLB entry for the virtual page table
- * is missing. This happens only as a result of a previous
- * (the "original") TLB miss, which may either be caused by an instruction
- * fetch or a data access (or non-access).
- *
- * What we do here is normal TLB miss handing for the _original_ miss,
- * followed by inserting the TLB entry for the virtual page table page
- * that the VHPT walker was attempting to access. The latter gets
- * inserted as long as page table entry above pte level have valid
- * mappings for the faulting address. The TLB entry for the original
- * miss gets inserted only if the pte entry indicates that the page is
- * present.
- *
- * do_page_fault gets invoked in the following cases:
- * - the faulting virtual address uses unimplemented address bits
- * - the faulting virtual address has no valid page table mapping
- */
- MOV_FROM_IFA(r16) // get address that caused the TLB miss
-#ifdef CONFIG_HUGETLB_PAGE
- movl r18=PAGE_SHIFT
- MOV_FROM_ITIR(r25)
-#endif
- ;;
- RSM_PSR_DT // use physical addressing for data
- mov r31=pr // save the predicate registers
- mov r19=IA64_KR(PT_BASE) // get page table base address
- shl r21=r16,3 // shift bit 60 into sign bit
- shr.u r17=r16,61 // get the region number into r17
- ;;
- shr.u r22=r21,3
-#ifdef CONFIG_HUGETLB_PAGE
- extr.u r26=r25,2,6
- ;;
- cmp.ne p8,p0=r18,r26
- sub r27=r26,r18
- ;;
-(p8) dep r25=r18,r25,2,6
-(p8) shr r22=r22,r27
-#endif
- ;;
- cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
- shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit
- ;;
-(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
-
- srlz.d
- LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
-
- .pred.rel "mutex", p6, p7
-(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
-(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
- ;;
-(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
-(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
- cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
-#if CONFIG_PGTABLE_LEVELS == 4
- shr.u r28=r22,PUD_SHIFT // shift pud index into position
-#else
- shr.u r18=r22,PMD_SHIFT // shift pmd index into position
-#endif
- ;;
- ld8 r17=[r17] // get *pgd (may be 0)
- ;;
-(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
-#if CONFIG_PGTABLE_LEVELS == 4
- dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
- ;;
- shr.u r18=r22,PMD_SHIFT // shift pmd index into position
-(p7) ld8 r29=[r28] // get *pud (may be 0)
- ;;
-(p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
- dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
-#else
- dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
-#endif
- ;;
-(p7) ld8 r20=[r17] // get *pmd (may be 0)
- shr.u r19=r22,PAGE_SHIFT // shift pte index into position
- ;;
-(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
- dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
- ;;
-(p7) ld8 r18=[r21] // read *pte
- MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss
- ;;
-(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
- MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss
- ;; // avoid RAW on p7
-(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
- dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
- ;;
- ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and
- // insert the data TLB entry
-(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
- MOV_TO_IFA(r22, r24)
-
-#ifdef CONFIG_HUGETLB_PAGE
- MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT
-#endif
-
- /*
- * Now compute and insert the TLB entry for the virtual page table. We never
- * execute in a page table page so there is no need to set the exception deferral
- * bit.
- */
- adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
- ;;
- ITC_D(p7, r24, r25)
- ;;
-#ifdef CONFIG_SMP
- /*
- * Tell the assemblers dependency-violation checker that the above "itc" instructions
- * cannot possibly affect the following loads:
- */
- dv_serialize_data
-
- /*
- * Re-check pagetable entry. If they changed, we may have received a ptc.g
- * between reading the pagetable and the "itc". If so, flush the entry we
- * inserted and retry. At this point, we have:
- *
- * r28 = equivalent of pud_offset(pgd, ifa)
- * r17 = equivalent of pmd_offset(pud, ifa)
- * r21 = equivalent of pte_offset(pmd, ifa)
- *
- * r29 = *pud
- * r20 = *pmd
- * r18 = *pte
- */
- ld8 r25=[r21] // read *pte again
- ld8 r26=[r17] // read *pmd again
-#if CONFIG_PGTABLE_LEVELS == 4
- ld8 r19=[r28] // read *pud again
-#endif
- cmp.ne p6,p7=r0,r0
- ;;
- cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
-#if CONFIG_PGTABLE_LEVELS == 4
- cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
-#endif
- mov r27=PAGE_SHIFT<<2
- ;;
-(p6) ptc.l r22,r27 // purge PTE page translation
-(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change
- ;;
-(p6) ptc.l r16,r27 // purge translation
-#endif
-
- mov pr=r31,-1 // restore predicate registers
- RFI
-END(vhpt_miss)
-
- .org ia64_ivt+0x400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
-ENTRY(itlb_miss)
- DBG_FAULT(1)
- /*
- * The ITLB handler accesses the PTE via the virtually mapped linear
- * page table. If a nested TLB miss occurs, we switch into physical
- * mode, walk the page table, and then re-execute the PTE read and
- * go on normally after that.
- */
- MOV_FROM_IFA(r16) // get virtual address
- mov r29=b0 // save b0
- mov r31=pr // save predicates
-.itlb_fault:
- MOV_FROM_IHA(r17) // get virtual address of PTE
- movl r30=1f // load nested fault continuation point
- ;;
-1: ld8 r18=[r17] // read *pte
- ;;
- mov b0=r29
- tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
-(p6) br.cond.spnt page_fault
- ;;
- ITC_I(p0, r18, r19)
- ;;
-#ifdef CONFIG_SMP
- /*
- * Tell the assemblers dependency-violation checker that the above "itc" instructions
- * cannot possibly affect the following loads:
- */
- dv_serialize_data
-
- ld8 r19=[r17] // read *pte again and see if same
- mov r20=PAGE_SHIFT<<2 // setup page size for purge
- ;;
- cmp.ne p7,p0=r18,r19
- ;;
-(p7) ptc.l r16,r20
-#endif
- mov pr=r31,-1
- RFI
-END(itlb_miss)
-
- .org ia64_ivt+0x0800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
-ENTRY(dtlb_miss)
- DBG_FAULT(2)
- /*
- * The DTLB handler accesses the PTE via the virtually mapped linear
- * page table. If a nested TLB miss occurs, we switch into physical
- * mode, walk the page table, and then re-execute the PTE read and
- * go on normally after that.
- */
- MOV_FROM_IFA(r16) // get virtual address
- mov r29=b0 // save b0
- mov r31=pr // save predicates
-dtlb_fault:
- MOV_FROM_IHA(r17) // get virtual address of PTE
- movl r30=1f // load nested fault continuation point
- ;;
-1: ld8 r18=[r17] // read *pte
- ;;
- mov b0=r29
- tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
-(p6) br.cond.spnt page_fault
- ;;
- ITC_D(p0, r18, r19)
- ;;
-#ifdef CONFIG_SMP
- /*
- * Tell the assemblers dependency-violation checker that the above "itc" instructions
- * cannot possibly affect the following loads:
- */
- dv_serialize_data
-
- ld8 r19=[r17] // read *pte again and see if same
- mov r20=PAGE_SHIFT<<2 // setup page size for purge
- ;;
- cmp.ne p7,p0=r18,r19
- ;;
-(p7) ptc.l r16,r20
-#endif
- mov pr=r31,-1
- RFI
-END(dtlb_miss)
-
- .org ia64_ivt+0x0c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
-ENTRY(alt_itlb_miss)
- DBG_FAULT(3)
- MOV_FROM_IFA(r16) // get address that caused the TLB miss
- movl r17=PAGE_KERNEL
- MOV_FROM_IPSR(p0, r21)
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- mov r31=pr
- ;;
-#ifdef CONFIG_DISABLE_VHPT
- shr.u r22=r16,61 // get the region number into r21
- ;;
- cmp.gt p8,p0=6,r22 // user mode
- ;;
- THASH(p8, r17, r16, r23)
- ;;
- MOV_TO_IHA(p8, r17, r23)
-(p8) mov r29=b0 // save b0
-(p8) br.cond.dptk .itlb_fault
-#endif
- extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
- and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- shr.u r18=r16,57 // move address bit 61 to bit 4
- ;;
- andcm r18=0x10,r18 // bit 4=~address-bit(61)
- cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
- or r19=r17,r19 // insert PTE control bits into r19
- ;;
- or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
-(p8) br.cond.spnt page_fault
- ;;
- ITC_I(p0, r19, r18) // insert the TLB entry
- mov pr=r31,-1
- RFI
-END(alt_itlb_miss)
-
- .org ia64_ivt+0x1000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
-ENTRY(alt_dtlb_miss)
- DBG_FAULT(4)
- MOV_FROM_IFA(r16) // get address that caused the TLB miss
- movl r17=PAGE_KERNEL
- MOV_FROM_ISR(r20)
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- MOV_FROM_IPSR(p0, r21)
- mov r31=pr
- mov r24=PERCPU_ADDR
- ;;
-#ifdef CONFIG_DISABLE_VHPT
- shr.u r22=r16,61 // get the region number into r21
- ;;
- cmp.gt p8,p0=6,r22 // access to region 0-5
- ;;
- THASH(p8, r17, r16, r25)
- ;;
- MOV_TO_IHA(p8, r17, r25)
-(p8) mov r29=b0 // save b0
-(p8) br.cond.dptk dtlb_fault
-#endif
- cmp.ge p10,p11=r16,r24 // access to per_cpu_data?
- tbit.z p12,p0=r16,61 // access to region 6?
- mov r25=PERCPU_PAGE_SHIFT << 2
- mov r26=PERCPU_PAGE_SIZE
- nop.m 0
- nop.b 0
- ;;
-(p10) mov r19=IA64_KR(PER_CPU_DATA)
-(p11) and r19=r19,r16 // clear non-ppn fields
- extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
- and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
- tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
- tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
- ;;
-(p10) sub r19=r19,r26
- MOV_TO_ITIR(p10, r25, r24)
- cmp.ne p8,p0=r0,r23
-(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
-(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr
-(p8) br.cond.spnt page_fault
-
- dep r21=-1,r21,IA64_PSR_ED_BIT,1
- ;;
- or r19=r19,r17 // insert PTE control bits into r19
- MOV_TO_IPSR(p6, r21, r24)
- ;;
- ITC_D(p7, r19, r18) // insert the TLB entry
- mov pr=r31,-1
- RFI
-END(alt_dtlb_miss)
-
- .org ia64_ivt+0x1400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
-ENTRY(nested_dtlb_miss)
- /*
- * In the absence of kernel bugs, we get here when the virtually mapped linear
- * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
- * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
- * table is missing, a nested TLB miss fault is triggered and control is
- * transferred to this point. When this happens, we lookup the pte for the
- * faulting address by walking the page table in physical mode and return to the
- * continuation point passed in register r30 (or call page_fault if the address is
- * not mapped).
- *
- * Input: r16: faulting address
- * r29: saved b0
- * r30: continuation address
- * r31: saved pr
- *
- * Output: r17: physical address of PTE of faulting address
- * r29: saved b0
- * r30: continuation address
- * r31: saved pr
- *
- * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
- */
- RSM_PSR_DT // switch to using physical data addressing
- mov r19=IA64_KR(PT_BASE) // get the page table base address
- shl r21=r16,3 // shift bit 60 into sign bit
- MOV_FROM_ITIR(r18)
- ;;
- shr.u r17=r16,61 // get the region number into r17
- extr.u r18=r18,2,6 // get the faulting page size
- ;;
- cmp.eq p6,p7=5,r17 // is faulting address in region 5?
- add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address
- add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
- ;;
- shr.u r22=r16,r22
- shr.u r18=r16,r18
-(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
-
- srlz.d
- LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
-
- .pred.rel "mutex", p6, p7
-(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
-(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
- ;;
-(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
-(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
- cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
-#if CONFIG_PGTABLE_LEVELS == 4
- shr.u r18=r22,PUD_SHIFT // shift pud index into position
-#else
- shr.u r18=r22,PMD_SHIFT // shift pmd index into position
-#endif
- ;;
- ld8 r17=[r17] // get *pgd (may be 0)
- ;;
-(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
- dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
- ;;
-#if CONFIG_PGTABLE_LEVELS == 4
-(p7) ld8 r17=[r17] // get *pud (may be 0)
- shr.u r18=r22,PMD_SHIFT // shift pmd index into position
- ;;
-(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL?
- dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
- ;;
-#endif
-(p7) ld8 r17=[r17] // get *pmd (may be 0)
- shr.u r19=r22,PAGE_SHIFT // shift pte index into position
- ;;
-(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL?
- dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr);
-(p6) br.cond.spnt page_fault
- mov b0=r30
- br.sptk.many b0 // return to continuation point
-END(nested_dtlb_miss)
-
- .org ia64_ivt+0x1800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
-ENTRY(ikey_miss)
- DBG_FAULT(6)
- FAULT(6)
-END(ikey_miss)
-
- .org ia64_ivt+0x1c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-ENTRY(dkey_miss)
- DBG_FAULT(7)
- FAULT(7)
-END(dkey_miss)
-
- .org ia64_ivt+0x2000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
-ENTRY(dirty_bit)
- DBG_FAULT(8)
- /*
- * What we do here is to simply turn on the dirty bit in the PTE. We need to
- * update both the page-table and the TLB entry. To efficiently access the PTE,
- * we address it through the virtual page table. Most likely, the TLB entry for
- * the relevant virtual page table page is still present in the TLB so we can
- * normally do this without additional TLB misses. In case the necessary virtual
- * page table TLB entry isn't present, we take a nested TLB miss hit where we look
- * up the physical address of the L3 PTE and then continue at label 1 below.
- */
- MOV_FROM_IFA(r16) // get the address that caused the fault
- movl r30=1f // load continuation point in case of nested fault
- ;;
- THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
- mov r29=b0 // save b0 in case of nested fault
- mov r31=pr // save pr
-#ifdef CONFIG_SMP
- mov r28=ar.ccv // save ar.ccv
- ;;
-1: ld8 r18=[r17]
- ;; // avoid RAW on r18
- mov ar.ccv=r18 // set compare value for cmpxchg
- or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
- tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
- ;;
-(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present
- mov r24=PAGE_SHIFT<<2
- ;;
-(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
- ;;
- ITC_D(p6, r25, r18) // install updated PTE
- ;;
- /*
- * Tell the assemblers dependency-violation checker that the above "itc" instructions
- * cannot possibly affect the following loads:
- */
- dv_serialize_data
-
- ld8 r18=[r17] // read PTE again
- ;;
- cmp.eq p6,p7=r18,r25 // is it same as the newly installed
- ;;
-(p7) ptc.l r16,r24
- mov b0=r29 // restore b0
- mov ar.ccv=r28
-#else
- ;;
-1: ld8 r18=[r17]
- ;; // avoid RAW on r18
- or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
- mov b0=r29 // restore b0
- ;;
- st8 [r17]=r18 // store back updated PTE
- ITC_D(p0, r18, r16) // install updated PTE
-#endif
- mov pr=r31,-1 // restore pr
- RFI
-END(dirty_bit)
-
- .org ia64_ivt+0x2400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
-ENTRY(iaccess_bit)
- DBG_FAULT(9)
- // Like Entry 8, except for instruction access
- MOV_FROM_IFA(r16) // get the address that caused the fault
- movl r30=1f // load continuation point in case of nested fault
- mov r31=pr // save predicates
-#ifdef CONFIG_ITANIUM
- /*
- * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
- */
- MOV_FROM_IPSR(p0, r17)
- ;;
- MOV_FROM_IIP(r18)
- tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
- ;;
-(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
-#endif /* CONFIG_ITANIUM */
- ;;
- THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
- mov r29=b0 // save b0 in case of nested fault)
-#ifdef CONFIG_SMP
- mov r28=ar.ccv // save ar.ccv
- ;;
-1: ld8 r18=[r17]
- ;;
- mov ar.ccv=r18 // set compare value for cmpxchg
- or r25=_PAGE_A,r18 // set the accessed bit
- tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
- ;;
-(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present
- mov r24=PAGE_SHIFT<<2
- ;;
-(p6) cmp.eq p6,p7=r26,r18 // Only if page present
- ;;
- ITC_I(p6, r25, r26) // install updated PTE
- ;;
- /*
- * Tell the assemblers dependency-violation checker that the above "itc" instructions
- * cannot possibly affect the following loads:
- */
- dv_serialize_data
-
- ld8 r18=[r17] // read PTE again
- ;;
- cmp.eq p6,p7=r18,r25 // is it same as the newly installed
- ;;
-(p7) ptc.l r16,r24
- mov b0=r29 // restore b0
- mov ar.ccv=r28
-#else /* !CONFIG_SMP */
- ;;
-1: ld8 r18=[r17]
- ;;
- or r18=_PAGE_A,r18 // set the accessed bit
- mov b0=r29 // restore b0
- ;;
- st8 [r17]=r18 // store back updated PTE
- ITC_I(p0, r18, r16) // install updated PTE
-#endif /* !CONFIG_SMP */
- mov pr=r31,-1
- RFI
-END(iaccess_bit)
-
- .org ia64_ivt+0x2800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
-ENTRY(daccess_bit)
- DBG_FAULT(10)
- // Like Entry 8, except for data access
- MOV_FROM_IFA(r16) // get the address that caused the fault
- movl r30=1f // load continuation point in case of nested fault
- ;;
- THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
- mov r31=pr
- mov r29=b0 // save b0 in case of nested fault)
-#ifdef CONFIG_SMP
- mov r28=ar.ccv // save ar.ccv
- ;;
-1: ld8 r18=[r17]
- ;; // avoid RAW on r18
- mov ar.ccv=r18 // set compare value for cmpxchg
- or r25=_PAGE_A,r18 // set the dirty bit
- tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
- ;;
-(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present
- mov r24=PAGE_SHIFT<<2
- ;;
-(p6) cmp.eq p6,p7=r26,r18 // Only if page is present
- ;;
- ITC_D(p6, r25, r26) // install updated PTE
- /*
- * Tell the assemblers dependency-violation checker that the above "itc" instructions
- * cannot possibly affect the following loads:
- */
- dv_serialize_data
- ;;
- ld8 r18=[r17] // read PTE again
- ;;
- cmp.eq p6,p7=r18,r25 // is it same as the newly installed
- ;;
-(p7) ptc.l r16,r24
- mov ar.ccv=r28
-#else
- ;;
-1: ld8 r18=[r17]
- ;; // avoid RAW on r18
- or r18=_PAGE_A,r18 // set the accessed bit
- ;;
- st8 [r17]=r18 // store back updated PTE
- ITC_D(p0, r18, r16) // install updated PTE
-#endif
- mov b0=r29 // restore b0
- mov pr=r31,-1
- RFI
-END(daccess_bit)
-
- .org ia64_ivt+0x2c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
-ENTRY(break_fault)
- /*
- * The streamlined system call entry/exit paths only save/restore the initial part
- * of pt_regs. This implies that the callers of system-calls must adhere to the
- * normal procedure calling conventions.
- *
- * Registers to be saved & restored:
- * CR registers: cr.ipsr, cr.iip, cr.ifs
- * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
- * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
- * Registers to be restored only:
- * r8-r11: output value from the system call.
- *
- * During system call exit, scratch registers (including r15) are modified/cleared
- * to prevent leaking bits from kernel to user level.
- */
- DBG_FAULT(11)
- mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
- MOV_FROM_IPSR(p0, r29) // M2 (12 cyc)
- mov r31=pr // I0 (2 cyc)
-
- MOV_FROM_IIM(r17) // M2 (2 cyc)
- mov.m r27=ar.rsc // M2 (12 cyc)
- mov r18=__IA64_BREAK_SYSCALL // A
-
- mov.m ar.rsc=0 // M2
- mov.m r21=ar.fpsr // M2 (12 cyc)
- mov r19=b6 // I0 (2 cyc)
- ;;
- mov.m r23=ar.bspstore // M2 (12 cyc)
- mov.m r24=ar.rnat // M2 (5 cyc)
- mov.i r26=ar.pfs // I0 (2 cyc)
-
- invala // M0|1
- nop.m 0 // M
- mov r20=r1 // A save r1
-
- nop.m 0
- movl r30=sys_call_table // X
-
- MOV_FROM_IIP(r28) // M2 (2 cyc)
- cmp.eq p0,p7=r18,r17 // I0 is this a system call?
-(p7) br.cond.spnt non_syscall // B no ->
- //
- // From this point on, we are definitely on the syscall-path
- // and we can use (non-banked) scratch registers.
- //
-///////////////////////////////////////////////////////////////////////
- mov r1=r16 // A move task-pointer to "addl"-addressable reg
- mov r2=r16 // A setup r2 for ia64_syscall_setup
- add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
-
- adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
- adds r15=-1024,r15 // A subtract 1024 from syscall number
- mov r3=NR_syscalls - 1
- ;;
- ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
- ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
- extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
-
- shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
- addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
- cmp.leu p6,p7=r15,r3 // A syscall number in range?
- ;;
-
- lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
-(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
- tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
-
- mov.m ar.bspstore=r22 // M2 switch to kernel RBS
- cmp.eq p8,p9=2,r8 // A isr.ei==2?
- ;;
-
-(p8) mov r8=0 // A clear ei to 0
-(p7) movl r30=sys_ni_syscall // X
-
-(p8) adds r28=16,r28 // A switch cr.iip to next bundle
-(p9) adds r8=1,r8 // A increment ei to next slot
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- ;;
- mov b6=r30 // I0 setup syscall handler branch reg early
-#else
- nop.i 0
- ;;
-#endif
-
- mov.m r25=ar.unat // M2 (5 cyc)
- dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
- adds r15=1024,r15 // A restore original syscall number
- //
- // If any of the above loads miss in L1D, we'll stall here until
- // the data arrives.
- //
-///////////////////////////////////////////////////////////////////////
- st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
-#else
- mov b6=r30 // I0 setup syscall handler branch reg early
-#endif
- cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
-
- and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
- mov r18=ar.bsp // M2 (12 cyc)
-(pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS
- ;;
-.back_from_break_fixup:
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
- cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
- br.call.sptk.many b7=ia64_syscall_setup // B
-1:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- // mov.m r30=ar.itc is called in advance, and r13 is current
- add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
- add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
-(pKStk) br.cond.spnt .skip_accounting // B unlikely skip
- ;;
- ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp
- ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave
- ;;
- ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime
- ld8 r21=[r17] // M cumulated utime
- sub r22=r19,r18 // A stime before leave
- ;;
- st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp
- sub r18=r30,r19 // A elapsed time in user
- ;;
- add r20=r20,r22 // A sum stime
- add r21=r21,r18 // A sum utime
- ;;
- st8 [r16]=r20 // M update stime
- st8 [r17]=r21 // M update utime
- ;;
-.skip_accounting:
-#endif
- mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
- nop 0
- BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1
- ;;
-
- SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection
- // M0 ensure interruption collection is on
- movl r3=ia64_ret_from_syscall // X
- ;;
- mov rp=r3 // I0 set the real return addr
-(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
-
- SSM_PSR_I(p15, p15, r16) // M2 restore psr.i
-(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
- br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
- // NOT REACHED
-///////////////////////////////////////////////////////////////////////
- // On entry, we optimistically assumed that we're coming from user-space.
- // For the rare cases where a system-call is done from within the kernel,
- // we fix things up at this point:
-.break_fixup:
- add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure
- mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
- ;;
- mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
- br.cond.sptk .back_from_break_fixup
-END(break_fault)
-
- .org ia64_ivt+0x3000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
-ENTRY(interrupt)
- /* interrupt handler has become too big to fit this area. */
- br.sptk.many __interrupt
-END(interrupt)
-
- .org ia64_ivt+0x3400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x3400 Entry 13 (size 64 bundles) Reserved
- DBG_FAULT(13)
- FAULT(13)
-
- .org ia64_ivt+0x3800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x3800 Entry 14 (size 64 bundles) Reserved
- DBG_FAULT(14)
- FAULT(14)
-
- /*
- * There is no particular reason for this code to be here, other than that
- * there happens to be space here that would go unused otherwise. If this
- * fault ever gets "unreserved", simply moved the following code to a more
- * suitable spot...
- *
- * ia64_syscall_setup() is a separate subroutine so that it can
- * allocate stacked registers so it can safely demine any
- * potential NaT values from the input registers.
- *
- * On entry:
- * - executing on bank 0 or bank 1 register set (doesn't matter)
- * - r1: stack pointer
- * - r2: current task pointer
- * - r3: preserved
- * - r11: original contents (saved ar.pfs to be saved)
- * - r12: original contents (sp to be saved)
- * - r13: original contents (tp to be saved)
- * - r15: original contents (syscall # to be saved)
- * - r18: saved bsp (after switching to kernel stack)
- * - r19: saved b6
- * - r20: saved r1 (gp)
- * - r21: saved ar.fpsr
- * - r22: kernel's register backing store base (krbs_base)
- * - r23: saved ar.bspstore
- * - r24: saved ar.rnat
- * - r25: saved ar.unat
- * - r26: saved ar.pfs
- * - r27: saved ar.rsc
- * - r28: saved cr.iip
- * - r29: saved cr.ipsr
- * - r30: ar.itc for accounting (don't touch)
- * - r31: saved pr
- * - b0: original contents (to be saved)
- * On exit:
- * - p10: TRUE if syscall is invoked with more than 8 out
- * registers or r15's Nat is true
- * - r1: kernel's gp
- * - r3: preserved (same as on entry)
- * - r8: -EINVAL if p10 is true
- * - r12: points to kernel stack
- * - r13: points to current task
- * - r14: preserved (same as on entry)
- * - p13: preserved
- * - p15: TRUE if interrupts need to be re-enabled
- * - ar.fpsr: set to kernel settings
- * - b6: preserved (same as on entry)
- */
-GLOBAL_ENTRY(ia64_syscall_setup)
-#if PT(B6) != 0
-# error This code assumes that b6 is the first field in pt_regs.
-#endif
- st8 [r1]=r19 // save b6
- add r16=PT(CR_IPSR),r1 // initialize first base pointer
- add r17=PT(R11),r1 // initialize second base pointer
- ;;
- alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
- st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
- tnat.nz p8,p0=in0
-
- st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
- tnat.nz p9,p0=in1
-(pKStk) mov r18=r0 // make sure r18 isn't NaT
- ;;
-
- st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
- st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
- mov r28=b0 // save b0 (2 cyc)
- ;;
-
- st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
- dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
-(p8) mov in0=-1
- ;;
-
- st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
- extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
- and r8=0x7f,r19 // A // get sof of ar.pfs
-
- st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
- tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
-(p9) mov in1=-1
- ;;
-
-(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
- tnat.nz p10,p0=in2
- add r11=8,r11
- ;;
-(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
-(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
- tnat.nz p11,p0=in3
- ;;
-(p10) mov in2=-1
- tnat.nz p12,p0=in4 // [I0]
-(p11) mov in3=-1
- ;;
-(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
-(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
- shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
- ;;
- st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
- st8 [r17]=r28,PT(R1)-PT(B0) // save b0
- tnat.nz p13,p0=in5 // [I0]
- ;;
- st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
- st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
-(p12) mov in4=-1
- ;;
-
-.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
-.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
-(p13) mov in5=-1
- ;;
- st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
- tnat.nz p13,p0=in6
- cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
- ;;
- mov r8=1
-(p9) tnat.nz p10,p0=r15
- adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
-
- st8.spill [r17]=r15 // save r15
- tnat.nz p8,p0=in7
- nop.i 0
-
- mov r13=r2 // establish `current'
- movl r1=__gp // establish kernel global pointer
- ;;
- st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
-(p13) mov in6=-1
-(p8) mov in7=-1
-
- cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
- movl r17=FPSR_DEFAULT
- ;;
- mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
-(p10) mov r8=-EINVAL
- br.ret.sptk.many b7
-END(ia64_syscall_setup)
-
- .org ia64_ivt+0x3c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x3c00 Entry 15 (size 64 bundles) Reserved
- DBG_FAULT(15)
- FAULT(15)
-
- .org ia64_ivt+0x4000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x4000 Entry 16 (size 64 bundles) Reserved
- DBG_FAULT(16)
- FAULT(16)
-
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
- /*
- * There is no particular reason for this code to be here, other than
- * that there happens to be space here that would go unused otherwise.
- * If this fault ever gets "unreserved", simply moved the following
- * code to a more suitable spot...
- *
- * account_sys_enter is called from SAVE_MIN* macros if accounting is
- * enabled and if the macro is entered from user mode.
- */
-GLOBAL_ENTRY(account_sys_enter)
- // mov.m r20=ar.itc is called in advance, and r13 is current
- add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
- add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
- ;;
- ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
- ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel
- ;;
- ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
- ld8 r21=[r17] // cumulated utime
- sub r22=r19,r18 // stime before leave kernel
- ;;
- st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp
- sub r18=r20,r19 // elapsed time in user mode
- ;;
- add r23=r23,r22 // sum stime
- add r21=r21,r18 // sum utime
- ;;
- st8 [r16]=r23 // update stime
- st8 [r17]=r21 // update utime
- ;;
- br.ret.sptk.many rp
-END(account_sys_enter)
-#endif
-
- .org ia64_ivt+0x4400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x4400 Entry 17 (size 64 bundles) Reserved
- DBG_FAULT(17)
- FAULT(17)
-
- .org ia64_ivt+0x4800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x4800 Entry 18 (size 64 bundles) Reserved
- DBG_FAULT(18)
- FAULT(18)
-
- .org ia64_ivt+0x4c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x4c00 Entry 19 (size 64 bundles) Reserved
- DBG_FAULT(19)
- FAULT(19)
-
-//
-// --- End of long entries, Beginning of short entries
-//
-
- .org ia64_ivt+0x5000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
-ENTRY(page_not_present)
- DBG_FAULT(20)
- MOV_FROM_IFA(r16)
- RSM_PSR_DT
- /*
- * The Linux page fault handler doesn't expect non-present pages to be in
- * the TLB. Flush the existing entry now, so we meet that expectation.
- */
- mov r17=PAGE_SHIFT<<2
- ;;
- ptc.l r16,r17
- ;;
- mov r31=pr
- srlz.d
- br.sptk.many page_fault
-END(page_not_present)
-
- .org ia64_ivt+0x5100
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
-ENTRY(key_permission)
- DBG_FAULT(21)
- MOV_FROM_IFA(r16)
- RSM_PSR_DT
- mov r31=pr
- ;;
- srlz.d
- br.sptk.many page_fault
-END(key_permission)
-
- .org ia64_ivt+0x5200
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
-ENTRY(iaccess_rights)
- DBG_FAULT(22)
- MOV_FROM_IFA(r16)
- RSM_PSR_DT
- mov r31=pr
- ;;
- srlz.d
- br.sptk.many page_fault
-END(iaccess_rights)
-
- .org ia64_ivt+0x5300
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
-ENTRY(daccess_rights)
- DBG_FAULT(23)
- MOV_FROM_IFA(r16)
- RSM_PSR_DT
- mov r31=pr
- ;;
- srlz.d
- br.sptk.many page_fault
-END(daccess_rights)
-
- .org ia64_ivt+0x5400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
-ENTRY(general_exception)
- DBG_FAULT(24)
- MOV_FROM_ISR(r16)
- mov r31=pr
- ;;
- cmp4.eq p6,p0=0,r16
-(p6) br.sptk.many dispatch_illegal_op_fault
- ;;
- mov r19=24 // fault number
- br.sptk.many dispatch_to_fault_handler
-END(general_exception)
-
- .org ia64_ivt+0x5500
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
-ENTRY(disabled_fp_reg)
- DBG_FAULT(25)
- rsm psr.dfh // ensure we can access fph
- ;;
- srlz.d
- mov r31=pr
- mov r19=25
- br.sptk.many dispatch_to_fault_handler
-END(disabled_fp_reg)
-
- .org ia64_ivt+0x5600
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
-ENTRY(nat_consumption)
- DBG_FAULT(26)
-
- MOV_FROM_IPSR(p0, r16)
- MOV_FROM_ISR(r17)
- mov r31=pr // save PR
- ;;
- and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
- tbit.z p6,p0=r17,IA64_ISR_NA_BIT
- ;;
- cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
- dep r16=-1,r16,IA64_PSR_ED_BIT,1
-(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
- ;;
- MOV_TO_IPSR(p0, r16, r18)
- mov pr=r31,-1
- ;;
- RFI
-
-1: mov pr=r31,-1
- ;;
- FAULT(26)
-END(nat_consumption)
-
- .org ia64_ivt+0x5700
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
-ENTRY(speculation_vector)
- DBG_FAULT(27)
- /*
- * A [f]chk.[as] instruction needs to take the branch to the recovery code but
- * this part of the architecture is not implemented in hardware on some CPUs, such
- * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
- * the relative target (not yet sign extended). So after sign extending it we
- * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
- * i.e., the slot to restart into.
- *
- * cr.imm contains zero_ext(imm21)
- */
- MOV_FROM_IIM(r18)
- ;;
- MOV_FROM_IIP(r17)
- shl r18=r18,43 // put sign bit in position (43=64-21)
- ;;
-
- MOV_FROM_IPSR(p0, r16)
- shr r18=r18,39 // sign extend (39=43-4)
- ;;
-
- add r17=r17,r18 // now add the offset
- ;;
- MOV_TO_IIP(r17, r19)
- dep r16=0,r16,41,2 // clear EI
- ;;
-
- MOV_TO_IPSR(p0, r16, r19)
- ;;
-
- RFI
-END(speculation_vector)
-
- .org ia64_ivt+0x5800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5800 Entry 28 (size 16 bundles) Reserved
- DBG_FAULT(28)
- FAULT(28)
-
- .org ia64_ivt+0x5900
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
-ENTRY(debug_vector)
- DBG_FAULT(29)
- FAULT(29)
-END(debug_vector)
-
- .org ia64_ivt+0x5a00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
-ENTRY(unaligned_access)
- DBG_FAULT(30)
- mov r31=pr // prepare to save predicates
- ;;
- br.sptk.many dispatch_unaligned_handler
-END(unaligned_access)
-
- .org ia64_ivt+0x5b00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
-ENTRY(unsupported_data_reference)
- DBG_FAULT(31)
- FAULT(31)
-END(unsupported_data_reference)
-
- .org ia64_ivt+0x5c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
-ENTRY(floating_point_fault)
- DBG_FAULT(32)
- FAULT(32)
-END(floating_point_fault)
-
- .org ia64_ivt+0x5d00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
-ENTRY(floating_point_trap)
- DBG_FAULT(33)
- FAULT(33)
-END(floating_point_trap)
-
- .org ia64_ivt+0x5e00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
-ENTRY(lower_privilege_trap)
- DBG_FAULT(34)
- FAULT(34)
-END(lower_privilege_trap)
-
- .org ia64_ivt+0x5f00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
-ENTRY(taken_branch_trap)
- DBG_FAULT(35)
- FAULT(35)
-END(taken_branch_trap)
-
- .org ia64_ivt+0x6000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
-ENTRY(single_step_trap)
- DBG_FAULT(36)
- FAULT(36)
-END(single_step_trap)
-
- .org ia64_ivt+0x6100
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6100 Entry 37 (size 16 bundles) Reserved
- DBG_FAULT(37)
- FAULT(37)
-
- .org ia64_ivt+0x6200
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6200 Entry 38 (size 16 bundles) Reserved
- DBG_FAULT(38)
- FAULT(38)
-
- .org ia64_ivt+0x6300
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6300 Entry 39 (size 16 bundles) Reserved
- DBG_FAULT(39)
- FAULT(39)
-
- .org ia64_ivt+0x6400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6400 Entry 40 (size 16 bundles) Reserved
- DBG_FAULT(40)
- FAULT(40)
-
- .org ia64_ivt+0x6500
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6500 Entry 41 (size 16 bundles) Reserved
- DBG_FAULT(41)
- FAULT(41)
-
- .org ia64_ivt+0x6600
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6600 Entry 42 (size 16 bundles) Reserved
- DBG_FAULT(42)
- FAULT(42)
-
- .org ia64_ivt+0x6700
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6700 Entry 43 (size 16 bundles) Reserved
- DBG_FAULT(43)
- FAULT(43)
-
- .org ia64_ivt+0x6800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6800 Entry 44 (size 16 bundles) Reserved
- DBG_FAULT(44)
- FAULT(44)
-
- .org ia64_ivt+0x6900
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
-ENTRY(ia32_exception)
- DBG_FAULT(45)
- FAULT(45)
-END(ia32_exception)
-
- .org ia64_ivt+0x6a00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
-ENTRY(ia32_intercept)
- DBG_FAULT(46)
- FAULT(46)
-END(ia32_intercept)
-
- .org ia64_ivt+0x6b00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
-ENTRY(ia32_interrupt)
- DBG_FAULT(47)
- FAULT(47)
-END(ia32_interrupt)
-
- .org ia64_ivt+0x6c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6c00 Entry 48 (size 16 bundles) Reserved
- DBG_FAULT(48)
- FAULT(48)
-
- .org ia64_ivt+0x6d00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6d00 Entry 49 (size 16 bundles) Reserved
- DBG_FAULT(49)
- FAULT(49)
-
- .org ia64_ivt+0x6e00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6e00 Entry 50 (size 16 bundles) Reserved
- DBG_FAULT(50)
- FAULT(50)
-
- .org ia64_ivt+0x6f00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6f00 Entry 51 (size 16 bundles) Reserved
- DBG_FAULT(51)
- FAULT(51)
-
- .org ia64_ivt+0x7000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7000 Entry 52 (size 16 bundles) Reserved
- DBG_FAULT(52)
- FAULT(52)
-
- .org ia64_ivt+0x7100
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7100 Entry 53 (size 16 bundles) Reserved
- DBG_FAULT(53)
- FAULT(53)
-
- .org ia64_ivt+0x7200
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7200 Entry 54 (size 16 bundles) Reserved
- DBG_FAULT(54)
- FAULT(54)
-
- .org ia64_ivt+0x7300
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7300 Entry 55 (size 16 bundles) Reserved
- DBG_FAULT(55)
- FAULT(55)
-
- .org ia64_ivt+0x7400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7400 Entry 56 (size 16 bundles) Reserved
- DBG_FAULT(56)
- FAULT(56)
-
- .org ia64_ivt+0x7500
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7500 Entry 57 (size 16 bundles) Reserved
- DBG_FAULT(57)
- FAULT(57)
-
- .org ia64_ivt+0x7600
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7600 Entry 58 (size 16 bundles) Reserved
- DBG_FAULT(58)
- FAULT(58)
-
- .org ia64_ivt+0x7700
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7700 Entry 59 (size 16 bundles) Reserved
- DBG_FAULT(59)
- FAULT(59)
-
- .org ia64_ivt+0x7800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7800 Entry 60 (size 16 bundles) Reserved
- DBG_FAULT(60)
- FAULT(60)
-
- .org ia64_ivt+0x7900
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7900 Entry 61 (size 16 bundles) Reserved
- DBG_FAULT(61)
- FAULT(61)
-
- .org ia64_ivt+0x7a00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7a00 Entry 62 (size 16 bundles) Reserved
- DBG_FAULT(62)
- FAULT(62)
-
- .org ia64_ivt+0x7b00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7b00 Entry 63 (size 16 bundles) Reserved
- DBG_FAULT(63)
- FAULT(63)
-
- .org ia64_ivt+0x7c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7c00 Entry 64 (size 16 bundles) Reserved
- DBG_FAULT(64)
- FAULT(64)
-
- .org ia64_ivt+0x7d00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7d00 Entry 65 (size 16 bundles) Reserved
- DBG_FAULT(65)
- FAULT(65)
-
- .org ia64_ivt+0x7e00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7e00 Entry 66 (size 16 bundles) Reserved
- DBG_FAULT(66)
- FAULT(66)
-
- .org ia64_ivt+0x7f00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7f00 Entry 67 (size 16 bundles) Reserved
- DBG_FAULT(67)
- FAULT(67)
-
- //-----------------------------------------------------------------------------------
- // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
-ENTRY(page_fault)
- SSM_PSR_DT_AND_SRLZ_I
- ;;
- SAVE_MIN_WITH_COVER
- alloc r15=ar.pfs,0,0,3,0
- MOV_FROM_IFA(out0)
- MOV_FROM_ISR(out1)
- SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
- adds r3=8,r2 // set up second base pointer
- SSM_PSR_I(p15, p15, r14) // restore psr.i
- movl r14=ia64_leave_kernel
- ;;
- SAVE_REST
- mov rp=r14
- ;;
- adds out2=16,r12 // out2 = pointer to pt_regs
- br.call.sptk.many b6=ia64_do_page_fault // ignore return address
-END(page_fault)
-
-ENTRY(non_syscall)
- mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
- ;;
- SAVE_MIN_WITH_COVER
-
- // There is no particular reason for this code to be here, other than that
- // there happens to be space here that would go unused otherwise. If this
- // fault ever gets "unreserved", simply moved the following code to a more
- // suitable spot...
-
- alloc r14=ar.pfs,0,0,2,0
- MOV_FROM_IIM(out0)
- add out1=16,sp
- adds r3=8,r2 // set up second base pointer for SAVE_REST
-
- SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
- // guarantee that interruption collection is on
- SSM_PSR_I(p15, p15, r15) // restore psr.i
- movl r15=ia64_leave_kernel
- ;;
- SAVE_REST
- mov rp=r15
- ;;
- br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
-END(non_syscall)
-
-ENTRY(__interrupt)
- DBG_FAULT(12)
- mov r31=pr // prepare to save predicates
- ;;
- SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
- SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
- // ensure everybody knows psr.ic is back on
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
- SAVE_REST
- ;;
- MCA_RECOVER_RANGE(interrupt)
- alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
- MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg
- add out1=16,sp // pass pointer to pt_regs as second arg
- ;;
- srlz.d // make sure we see the effect of cr.ivr
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_handle_irq
-END(__interrupt)
-
- /*
- * There is no particular reason for this code to be here, other than that
- * there happens to be space here that would go unused otherwise. If this
- * fault ever gets "unreserved", simply moved the following code to a more
- * suitable spot...
- */
-
-ENTRY(dispatch_unaligned_handler)
- SAVE_MIN_WITH_COVER
- ;;
- alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
- MOV_FROM_IFA(out0)
- adds out1=16,sp
-
- SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
- // guarantee that interruption collection is on
- SSM_PSR_I(p15, p15, r3) // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.sptk.many ia64_prepare_handle_unaligned
-END(dispatch_unaligned_handler)
-
- /*
- * There is no particular reason for this code to be here, other than that
- * there happens to be space here that would go unused otherwise. If this
- * fault ever gets "unreserved", simply moved the following code to a more
- * suitable spot...
- */
-
-ENTRY(dispatch_to_fault_handler)
- /*
- * Input:
- * psr.ic: off
- * r19: fault vector number (e.g., 24 for General Exception)
- * r31: contains saved predicates (pr)
- */
- SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,5,0
- MOV_FROM_ISR(out1)
- MOV_FROM_IFA(out2)
- MOV_FROM_IIM(out3)
- MOV_FROM_ITIR(out4)
- ;;
- SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
- // guarantee that interruption collection is on
- mov out0=r15
- ;;
- SSM_PSR_I(p15, p15, r3) // restore psr.i
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_fault
-END(dispatch_to_fault_handler)
-
- /*
- * Squatting in this space ...
- *
- * This special case dispatcher for illegal operation faults allows preserved
- * registers to be modified through a callback function (asm only) that is handed
- * back from the fault handler in r8. Up to three arguments can be passed to the
- * callback function by returning an aggregate with the callback as its first
- * element, followed by the arguments.
- */
-ENTRY(dispatch_illegal_op_fault)
- .prologue
- .body
- SAVE_MIN_WITH_COVER
- SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
- // guarantee that interruption collection is on
- ;;
- SSM_PSR_I(p15, p15, r3) // restore psr.i
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
- alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
- mov out0=ar.ec
- ;;
- SAVE_REST
- PT_REGS_UNWIND_INFO(0)
- ;;
- br.call.sptk.many rp=ia64_illegal_op_fault
-.ret0: ;;
- alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
- mov out0=r9
- mov out1=r10
- mov out2=r11
- movl r15=ia64_leave_kernel
- ;;
- mov rp=r15
- mov b6=r8
- ;;
- cmp.ne p6,p0=0,r8
-(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
- br.sptk.many ia64_leave_kernel
-END(dispatch_illegal_op_fault)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
deleted file mode 100644
index ca34e51e84..0000000000
--- a/arch/ia64/kernel/kprobes.c
+++ /dev/null
@@ -1,911 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Kernel Probes (KProbes)
- * arch/ia64/kernel/kprobes.c
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- * Copyright (C) Intel Corporation, 2005
- *
- * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
- * <anil.s.keshavamurthy@intel.com> adapted from i386
- */
-
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/preempt.h>
-#include <linux/extable.h>
-#include <linux/kdebug.h>
-#include <linux/pgtable.h>
-
-#include <asm/sections.h>
-#include <asm/exception.h>
-
-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
-struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
-
-enum instruction_type {A, I, M, F, B, L, X, u};
-static enum instruction_type bundle_encoding[32][3] = {
- [0x00] = { M, I, I },
- [0x01] = { M, I, I },
- [0x02] = { M, I, I },
- [0x03] = { M, I, I },
- [0x04] = { M, L, X },
- [0x05] = { M, L, X },
- [0x06] = { u, u, u },
- [0x07] = { u, u, u },
- [0x08] = { M, M, I },
- [0x09] = { M, M, I },
- [0x0A] = { M, M, I },
- [0x0B] = { M, M, I },
- [0x0C] = { M, F, I },
- [0x0D] = { M, F, I },
- [0x0E] = { M, M, F },
- [0x0F] = { M, M, F },
- [0x10] = { M, I, B },
- [0x11] = { M, I, B },
- [0x12] = { M, B, B },
- [0x13] = { M, B, B },
- [0x14] = { u, u, u },
- [0x15] = { u, u, u },
- [0x16] = { B, B, B },
- [0x17] = { B, B, B },
- [0x18] = { M, M, B },
- [0x19] = { M, M, B },
- [0x1A] = { u, u, u },
- [0x1B] = { u, u, u },
- [0x1C] = { M, F, B },
- [0x1D] = { M, F, B },
- [0x1E] = { u, u, u },
- [0x1F] = { u, u, u },
-};
-
-/* Insert a long branch code */
-static void __kprobes set_brl_inst(void *from, void *to)
-{
- s64 rel = ((s64) to - (s64) from) >> 4;
- bundle_t *brl;
- brl = (bundle_t *) ((u64) from & ~0xf);
- brl->quad0.template = 0x05; /* [MLX](stop) */
- brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */
- brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2;
- brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46);
- /* brl.cond.sptk.many.clr rel<<4 (qp=0) */
- brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff);
-}
-
-/*
- * In this function we check to see if the instruction
- * is IP relative instruction and update the kprobe
- * inst flag accordingly
- */
-static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
- uint major_opcode,
- unsigned long kprobe_inst,
- struct kprobe *p)
-{
- p->ainsn.inst_flag = 0;
- p->ainsn.target_br_reg = 0;
- p->ainsn.slot = slot;
-
- /* Check for Break instruction
- * Bits 37:40 Major opcode to be zero
- * Bits 27:32 X6 to be zero
- * Bits 32:35 X3 to be zero
- */
- if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
- /* is a break instruction */
- p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
- return;
- }
-
- if (bundle_encoding[template][slot] == B) {
- switch (major_opcode) {
- case INDIRECT_CALL_OPCODE:
- p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
- p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
- break;
- case IP_RELATIVE_PREDICT_OPCODE:
- case IP_RELATIVE_BRANCH_OPCODE:
- p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
- break;
- case IP_RELATIVE_CALL_OPCODE:
- p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
- p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
- p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
- break;
- }
- } else if (bundle_encoding[template][slot] == X) {
- switch (major_opcode) {
- case LONG_CALL_OPCODE:
- p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
- p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
- break;
- }
- }
- return;
-}
-
-/*
- * In this function we check to see if the instruction
- * (qp) cmpx.crel.ctype p1,p2=r2,r3
- * on which we are inserting kprobe is cmp instruction
- * with ctype as unc.
- */
-static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
- uint major_opcode,
- unsigned long kprobe_inst)
-{
- cmp_inst_t cmp_inst;
- uint ctype_unc = 0;
-
- if (!((bundle_encoding[template][slot] == I) ||
- (bundle_encoding[template][slot] == M)))
- goto out;
-
- if (!((major_opcode == 0xC) || (major_opcode == 0xD) ||
- (major_opcode == 0xE)))
- goto out;
-
- cmp_inst.l = kprobe_inst;
- if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
- /* Integer compare - Register Register (A6 type)*/
- if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
- &&(cmp_inst.f.c == 1))
- ctype_unc = 1;
- } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
- /* Integer compare - Immediate Register (A8 type)*/
- if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
- ctype_unc = 1;
- }
-out:
- return ctype_unc;
-}
-
-/*
- * In this function we check to see if the instruction
- * on which we are inserting kprobe is supported.
- * Returns qp value if supported
- * Returns -EINVAL if unsupported
- */
-static int __kprobes unsupported_inst(uint template, uint slot,
- uint major_opcode,
- unsigned long kprobe_inst,
- unsigned long addr)
-{
- int qp;
-
- qp = kprobe_inst & 0x3f;
- if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) {
- if (slot == 1 && qp) {
- printk(KERN_WARNING "Kprobes on cmp unc "
- "instruction on slot 1 at <0x%lx> "
- "is not supported\n", addr);
- return -EINVAL;
-
- }
- qp = 0;
- }
- else if (bundle_encoding[template][slot] == I) {
- if (major_opcode == 0) {
- /*
- * Check for Integer speculation instruction
- * - Bit 33-35 to be equal to 0x1
- */
- if (((kprobe_inst >> 33) & 0x7) == 1) {
- printk(KERN_WARNING
- "Kprobes on speculation inst at <0x%lx> not supported\n",
- addr);
- return -EINVAL;
- }
- /*
- * IP relative mov instruction
- * - Bit 27-35 to be equal to 0x30
- */
- if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
- printk(KERN_WARNING
- "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
- addr);
- return -EINVAL;
-
- }
- }
- else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) &&
- (kprobe_inst & (0x1UL << 12))) {
- /* test bit instructions, tbit,tnat,tf
- * bit 33-36 to be equal to 0
- * bit 12 to be equal to 1
- */
- if (slot == 1 && qp) {
- printk(KERN_WARNING "Kprobes on test bit "
- "instruction on slot at <0x%lx> "
- "is not supported\n", addr);
- return -EINVAL;
- }
- qp = 0;
- }
- }
- else if (bundle_encoding[template][slot] == B) {
- if (major_opcode == 7) {
- /* IP-Relative Predict major code is 7 */
- printk(KERN_WARNING "Kprobes on IP-Relative"
- "Predict is not supported\n");
- return -EINVAL;
- }
- else if (major_opcode == 2) {
- /* Indirect Predict, major code is 2
- * bit 27-32 to be equal to 10 or 11
- */
- int x6=(kprobe_inst >> 27) & 0x3F;
- if ((x6 == 0x10) || (x6 == 0x11)) {
- printk(KERN_WARNING "Kprobes on "
- "Indirect Predict is not supported\n");
- return -EINVAL;
- }
- }
- }
- /* kernel does not use float instruction, here for safety kprobe
- * will judge whether it is fcmp/flass/float approximation instruction
- */
- else if (unlikely(bundle_encoding[template][slot] == F)) {
- if ((major_opcode == 4 || major_opcode == 5) &&
- (kprobe_inst & (0x1 << 12))) {
- /* fcmp/fclass unc instruction */
- if (slot == 1 && qp) {
- printk(KERN_WARNING "Kprobes on fcmp/fclass "
- "instruction on slot at <0x%lx> "
- "is not supported\n", addr);
- return -EINVAL;
-
- }
- qp = 0;
- }
- if ((major_opcode == 0 || major_opcode == 1) &&
- (kprobe_inst & (0x1UL << 33))) {
- /* float Approximation instruction */
- if (slot == 1 && qp) {
- printk(KERN_WARNING "Kprobes on float Approx "
- "instr at <0x%lx> is not supported\n",
- addr);
- return -EINVAL;
- }
- qp = 0;
- }
- }
- return qp;
-}
-
-/*
- * In this function we override the bundle with
- * the break instruction at the given slot.
- */
-static void __kprobes prepare_break_inst(uint template, uint slot,
- uint major_opcode,
- unsigned long kprobe_inst,
- struct kprobe *p,
- int qp)
-{
- unsigned long break_inst = BREAK_INST;
- bundle_t *bundle = &p->opcode.bundle;
-
- /*
- * Copy the original kprobe_inst qualifying predicate(qp)
- * to the break instruction
- */
- break_inst |= qp;
-
- switch (slot) {
- case 0:
- bundle->quad0.slot0 = break_inst;
- break;
- case 1:
- bundle->quad0.slot1_p0 = break_inst;
- bundle->quad1.slot1_p1 = break_inst >> (64-46);
- break;
- case 2:
- bundle->quad1.slot2 = break_inst;
- break;
- }
-
- /*
- * Update the instruction flag, so that we can
- * emulate the instruction properly after we
- * single step on original instruction
- */
- update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
-}
-
-static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
- unsigned long *kprobe_inst, uint *major_opcode)
-{
- unsigned long kprobe_inst_p0, kprobe_inst_p1;
- unsigned int template;
-
- template = bundle->quad0.template;
-
- switch (slot) {
- case 0:
- *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
- *kprobe_inst = bundle->quad0.slot0;
- break;
- case 1:
- *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
- kprobe_inst_p0 = bundle->quad0.slot1_p0;
- kprobe_inst_p1 = bundle->quad1.slot1_p1;
- *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
- break;
- case 2:
- *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
- *kprobe_inst = bundle->quad1.slot2;
- break;
- }
-}
-
-/* Returns non-zero if the addr is in the Interrupt Vector Table */
-static int __kprobes in_ivt_functions(unsigned long addr)
-{
- return (addr >= (unsigned long)__start_ivt_text
- && addr < (unsigned long)__end_ivt_text);
-}
-
-static int __kprobes valid_kprobe_addr(int template, int slot,
- unsigned long addr)
-{
- if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
- printk(KERN_WARNING "Attempting to insert unaligned kprobe "
- "at 0x%lx\n", addr);
- return -EINVAL;
- }
-
- if (in_ivt_functions(addr)) {
- printk(KERN_WARNING "Kprobes can't be inserted inside "
- "IVT functions at 0x%lx\n", addr);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- unsigned int i;
- i = atomic_add_return(1, &kcb->prev_kprobe_index);
- kcb->prev_kprobe[i-1].kp = kprobe_running();
- kcb->prev_kprobe[i-1].status = kcb->kprobe_status;
-}
-
-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- unsigned int i;
- i = atomic_read(&kcb->prev_kprobe_index);
- __this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp);
- kcb->kprobe_status = kcb->prev_kprobe[i-1].status;
- atomic_sub(1, &kcb->prev_kprobe_index);
-}
-
-static void __kprobes set_current_kprobe(struct kprobe *p,
- struct kprobe_ctlblk *kcb)
-{
- __this_cpu_write(current_kprobe, p);
-}
-
-void __kretprobe_trampoline(void)
-{
-}
-
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
-{
- regs->cr_iip = __kretprobe_trampoline_handler(regs, NULL);
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we don't want the post_handler
- * to run (and have re-enabled preemption)
- */
- return 1;
-}
-
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs)
-{
- ri->ret_addr = (kprobe_opcode_t *)regs->b0;
- ri->fp = NULL;
-
- /* Replace the return addr with trampoline addr */
- regs->b0 = (unsigned long)dereference_function_descriptor(__kretprobe_trampoline);
-}
-
-/* Check the instruction in the slot is break */
-static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
-{
- unsigned int major_opcode;
- unsigned int template = bundle->quad0.template;
- unsigned long kprobe_inst;
-
- /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
- if (slot == 1 && bundle_encoding[template][1] == L)
- slot++;
-
- /* Get Kprobe probe instruction at given slot*/
- get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
-
- /* For break instruction,
- * Bits 37:40 Major opcode to be zero
- * Bits 27:32 X6 to be zero
- * Bits 32:35 X3 to be zero
- */
- if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) {
- /* Not a break instruction */
- return 0;
- }
-
- /* Is a break instruction */
- return 1;
-}
-
-/*
- * In this function, we check whether the target bundle modifies IP or
- * it triggers an exception. If so, it cannot be boostable.
- */
-static int __kprobes can_boost(bundle_t *bundle, uint slot,
- unsigned long bundle_addr)
-{
- unsigned int template = bundle->quad0.template;
-
- do {
- if (search_exception_tables(bundle_addr + slot) ||
- __is_ia64_break_inst(bundle, slot))
- return 0; /* exception may occur in this bundle*/
- } while ((++slot) < 3);
- template &= 0x1e;
- if (template >= 0x10 /* including B unit */ ||
- template == 0x04 /* including X unit */ ||
- template == 0x06) /* undefined */
- return 0;
-
- return 1;
-}
-
-/* Prepare long jump bundle and disables other boosters if need */
-static void __kprobes prepare_booster(struct kprobe *p)
-{
- unsigned long addr = (unsigned long)p->addr & ~0xFULL;
- unsigned int slot = (unsigned long)p->addr & 0xf;
- struct kprobe *other_kp;
-
- if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) {
- set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1);
- p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
- }
-
- /* disables boosters in previous slots */
- for (; addr < (unsigned long)p->addr; addr++) {
- other_kp = get_kprobe((void *)addr);
- if (other_kp)
- other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE;
- }
-}
-
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
-{
- unsigned long addr = (unsigned long) p->addr;
- unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
- unsigned long kprobe_inst=0;
- unsigned int slot = addr & 0xf, template, major_opcode = 0;
- bundle_t *bundle;
- int qp;
-
- bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
- template = bundle->quad0.template;
-
- if(valid_kprobe_addr(template, slot, addr))
- return -EINVAL;
-
- /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
- if (slot == 1 && bundle_encoding[template][1] == L)
- slot++;
-
- /* Get kprobe_inst and major_opcode from the bundle */
- get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
-
- qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr);
- if (qp < 0)
- return -EINVAL;
-
- p->ainsn.insn = get_insn_slot();
- if (!p->ainsn.insn)
- return -ENOMEM;
- memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
- memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
-
- prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
-
- prepare_booster(p);
-
- return 0;
-}
-
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
- unsigned long arm_addr;
- bundle_t *src, *dest;
-
- arm_addr = ((unsigned long)p->addr) & ~0xFUL;
- dest = &((kprobe_opcode_t *)arm_addr)->bundle;
- src = &p->opcode.bundle;
-
- flush_icache_range((unsigned long)p->ainsn.insn,
- (unsigned long)p->ainsn.insn +
- sizeof(kprobe_opcode_t) * MAX_INSN_SIZE);
-
- switch (p->ainsn.slot) {
- case 0:
- dest->quad0.slot0 = src->quad0.slot0;
- break;
- case 1:
- dest->quad1.slot1_p1 = src->quad1.slot1_p1;
- break;
- case 2:
- dest->quad1.slot2 = src->quad1.slot2;
- break;
- }
- flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
-}
-
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
-{
- unsigned long arm_addr;
- bundle_t *src, *dest;
-
- arm_addr = ((unsigned long)p->addr) & ~0xFUL;
- dest = &((kprobe_opcode_t *)arm_addr)->bundle;
- /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
- src = &p->ainsn.insn->bundle;
- switch (p->ainsn.slot) {
- case 0:
- dest->quad0.slot0 = src->quad0.slot0;
- break;
- case 1:
- dest->quad1.slot1_p1 = src->quad1.slot1_p1;
- break;
- case 2:
- dest->quad1.slot2 = src->quad1.slot2;
- break;
- }
- flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
-}
-
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
- if (p->ainsn.insn) {
- free_insn_slot(p->ainsn.insn,
- p->ainsn.inst_flag & INST_FLAG_BOOSTABLE);
- p->ainsn.insn = NULL;
- }
-}
-/*
- * We are resuming execution after a single step fault, so the pt_regs
- * structure reflects the register state after we executed the instruction
- * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust
- * the ip to point back to the original stack address. To set the IP address
- * to original stack address, handle the case where we need to fixup the
- * relative IP address and/or fixup branch register.
- */
-static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
-{
- unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
- unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
- unsigned long template;
- int slot = ((unsigned long)p->addr & 0xf);
-
- template = p->ainsn.insn->bundle.quad0.template;
-
- if (slot == 1 && bundle_encoding[template][1] == L)
- slot = 2;
-
- if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {
-
- if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
- /* Fix relative IP address */
- regs->cr_iip = (regs->cr_iip - bundle_addr) +
- resume_addr;
- }
-
- if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
- /*
- * Fix target branch register, software convention is
- * to use either b0 or b6 or b7, so just checking
- * only those registers
- */
- switch (p->ainsn.target_br_reg) {
- case 0:
- if ((regs->b0 == bundle_addr) ||
- (regs->b0 == bundle_addr + 0x10)) {
- regs->b0 = (regs->b0 - bundle_addr) +
- resume_addr;
- }
- break;
- case 6:
- if ((regs->b6 == bundle_addr) ||
- (regs->b6 == bundle_addr + 0x10)) {
- regs->b6 = (regs->b6 - bundle_addr) +
- resume_addr;
- }
- break;
- case 7:
- if ((regs->b7 == bundle_addr) ||
- (regs->b7 == bundle_addr + 0x10)) {
- regs->b7 = (regs->b7 - bundle_addr) +
- resume_addr;
- }
- break;
- } /* end switch */
- }
- goto turn_ss_off;
- }
-
- if (slot == 2) {
- if (regs->cr_iip == bundle_addr + 0x10) {
- regs->cr_iip = resume_addr + 0x10;
- }
- } else {
- if (regs->cr_iip == bundle_addr) {
- regs->cr_iip = resume_addr;
- }
- }
-
-turn_ss_off:
- /* Turn off Single Step bit */
- ia64_psr(regs)->ss = 0;
-}
-
-static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
-{
- unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle;
- unsigned long slot = (unsigned long)p->addr & 0xf;
-
- /* single step inline if break instruction */
- if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
- regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
- else
- regs->cr_iip = bundle_addr & ~0xFULL;
-
- if (slot > 2)
- slot = 0;
-
- ia64_psr(regs)->ri = slot;
-
- /* turn on single stepping */
- ia64_psr(regs)->ss = 1;
-}
-
-static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
-{
- unsigned int slot = ia64_psr(regs)->ri;
- unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
- bundle_t bundle;
-
- memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
-
- return __is_ia64_break_inst(&bundle, slot);
-}
-
-static int __kprobes pre_kprobes_handler(struct die_args *args)
-{
- struct kprobe *p;
- int ret = 0;
- struct pt_regs *regs = args->regs;
- kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
- struct kprobe_ctlblk *kcb;
-
- /*
- * We don't want to be preempted for the entire
- * duration of kprobe processing
- */
- preempt_disable();
- kcb = get_kprobe_ctlblk();
-
- /* Handle recursion cases */
- if (kprobe_running()) {
- p = get_kprobe(addr);
- if (p) {
- if ((kcb->kprobe_status == KPROBE_HIT_SS) &&
- (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
- ia64_psr(regs)->ss = 0;
- goto no_kprobe;
- }
- /* We have reentered the pre_kprobe_handler(), since
- * another probe was hit while within the handler.
- * We here save the original kprobes variables and
- * just single step on the instruction of the new probe
- * without calling any user handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, kcb);
- kprobes_inc_nmissed_count(p);
- prepare_ss(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
- return 1;
- } else if (!is_ia64_break_inst(regs)) {
- /* The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
- goto no_kprobe;
- } else {
- /* Not our break */
- goto no_kprobe;
- }
- }
-
- p = get_kprobe(addr);
- if (!p) {
- if (!is_ia64_break_inst(regs)) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- */
- ret = 1;
-
- }
-
- /* Not one of our break, let kernel handle it */
- goto no_kprobe;
- }
-
- set_current_kprobe(p, kcb);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-
- if (p->pre_handler && p->pre_handler(p, regs)) {
- reset_current_kprobe();
- preempt_enable_no_resched();
- return 1;
- }
-
-#if !defined(CONFIG_PREEMPTION)
- if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
- /* Boost up -- we can execute copied instructions directly */
- ia64_psr(regs)->ri = p->ainsn.slot;
- regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL;
- /* turn single stepping off */
- ia64_psr(regs)->ss = 0;
-
- reset_current_kprobe();
- preempt_enable_no_resched();
- return 1;
- }
-#endif
- prepare_ss(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
- return 1;
-
-no_kprobe:
- preempt_enable_no_resched();
- return ret;
-}
-
-static int __kprobes post_kprobes_handler(struct pt_regs *regs)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (!cur)
- return 0;
-
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
- }
-
- resume_execution(cur, regs);
-
- /*Restore back the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe(kcb);
- goto out;
- }
- reset_current_kprobe();
-
-out:
- preempt_enable_no_resched();
- return 1;
-}
-
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-
- switch(kcb->kprobe_status) {
- case KPROBE_HIT_SS:
- case KPROBE_REENTER:
- /*
- * We are here because the instruction being single
- * stepped caused a page fault. We reset the current
- * kprobe and the instruction pointer points back to
- * the probe address and allow the page fault handler
- * to continue as a normal page fault.
- */
- regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL;
- ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf;
- if (kcb->kprobe_status == KPROBE_REENTER)
- restore_previous_kprobe(kcb);
- else
- reset_current_kprobe();
- preempt_enable_no_resched();
- break;
- case KPROBE_HIT_ACTIVE:
- case KPROBE_HIT_SSDONE:
- /*
- * In case the user-specified fault handler returned
- * zero, try to fix up.
- */
- if (ia64_done_with_exception(regs))
- return 1;
-
- /*
- * Let ia64_do_page_fault() fix it.
- */
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct die_args *args = (struct die_args *)data;
- int ret = NOTIFY_DONE;
-
- if (args->regs && user_mode(args->regs))
- return ret;
-
- switch(val) {
- case DIE_BREAK:
- /* err is break number from ia64_bad_break() */
- if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
- || args->err == 0)
- if (pre_kprobes_handler(args))
- ret = NOTIFY_STOP;
- break;
- case DIE_FAULT:
- /* err is vector number from ia64_fault() */
- if (args->err == 36)
- if (post_kprobes_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static struct kprobe trampoline_p = {
- .pre_handler = trampoline_probe_handler
-};
-
-int __init arch_init_kprobes(void)
-{
- trampoline_p.addr =
- dereference_function_descriptor(__kretprobe_trampoline);
- return register_kprobe(&trampoline_p);
-}
-
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
-{
- if (p->addr ==
- dereference_function_descriptor(__kretprobe_trampoline))
- return 1;
-
- return 0;
-}
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
deleted file mode 100644
index 4db9ca144f..0000000000
--- a/arch/ia64/kernel/machine_kexec.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/ia64/kernel/machine_kexec.c
- *
- * Handle transition of Linux booting another kernel
- * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P.
- * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
- * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
- */
-
-#include <linux/mm.h>
-#include <linux/kexec.h>
-#include <linux/cpu.h>
-#include <linux/irq.h>
-#include <linux/efi.h>
-#include <linux/numa.h>
-#include <linux/mmzone.h>
-
-#include <asm/efi.h>
-#include <asm/numa.h>
-#include <asm/mmu_context.h>
-#include <asm/setup.h>
-#include <asm/delay.h>
-#include <asm/meminit.h>
-#include <asm/processor.h>
-#include <asm/sal.h>
-#include <asm/mca.h>
-
-typedef void (*relocate_new_kernel_t)(
- unsigned long indirection_page,
- unsigned long start_address,
- struct ia64_boot_param *boot_param,
- unsigned long pal_addr) __noreturn;
-
-struct kimage *ia64_kimage;
-
-struct resource efi_memmap_res = {
- .name = "EFI Memory Map",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-struct resource boot_param_res = {
- .name = "Boot parameter",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-
-/*
- * Do what every setup is needed on image and the
- * reboot code buffer to allow us to avoid allocations
- * later.
- */
-int machine_kexec_prepare(struct kimage *image)
-{
- void *control_code_buffer;
- const unsigned long *func;
-
- func = (unsigned long *)&relocate_new_kernel;
- /* Pre-load control code buffer to minimize work in kexec path */
- control_code_buffer = page_address(image->control_code_page);
- memcpy((void *)control_code_buffer, (const void *)func[0],
- relocate_new_kernel_size);
- flush_icache_range((unsigned long)control_code_buffer,
- (unsigned long)control_code_buffer + relocate_new_kernel_size);
- ia64_kimage = image;
-
- return 0;
-}
-
-void machine_kexec_cleanup(struct kimage *image)
-{
-}
-
-/*
- * Do not allocate memory (or fail in any way) in machine_kexec().
- * We are past the point of no return, committed to rebooting now.
- */
-static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
-{
- struct kimage *image = arg;
- relocate_new_kernel_t rnk;
- void *pal_addr = efi_get_pal_addr();
- unsigned long code_addr;
- int ii;
- u64 fp, gp;
- ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump;
-
- BUG_ON(!image);
- code_addr = (unsigned long)page_address(image->control_code_page);
- if (image->type == KEXEC_TYPE_CRASH) {
- crash_save_this_cpu();
- current->thread.ksp = (__u64)info->sw - 16;
-
- /* Register noop init handler */
- fp = ia64_tpa(init_handler->fp);
- gp = ia64_tpa(ia64_getreg(_IA64_REG_GP));
- ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0);
- } else {
- /* Unregister init handlers of current kernel */
- ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0);
- }
-
- /* Unregister mca handler - No more recovery on current kernel */
- ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0);
-
- /* Interrupts aren't acceptable while we reboot */
- local_irq_disable();
-
- /* Mask CMC and Performance Monitor interrupts */
- ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
- ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
-
- /* Mask ITV and Local Redirect Registers */
- ia64_set_itv(1 << 16);
- ia64_set_lrr0(1 << 16);
- ia64_set_lrr1(1 << 16);
-
- /* terminate possible nested in-service interrupts */
- for (ii = 0; ii < 16; ii++)
- ia64_eoi();
-
- /* unmask TPR and clear any pending interrupts */
- ia64_setreg(_IA64_REG_CR_TPR, 0);
- ia64_srlz_d();
- while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
- ia64_eoi();
- rnk = (relocate_new_kernel_t)&code_addr;
- (*rnk)(image->head, image->start, ia64_boot_param,
- GRANULEROUNDDOWN((unsigned long) pal_addr));
- BUG();
-}
-
-void machine_kexec(struct kimage *image)
-{
- BUG_ON(!image);
- unw_init_running(ia64_machine_kexec, image);
- for(;;);
-}
-
-void arch_crash_save_vmcoreinfo(void)
-{
-#if defined(CONFIG_SPARSEMEM)
- VMCOREINFO_SYMBOL(pgdat_list);
- VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES);
-#endif
-#ifdef CONFIG_NUMA
- VMCOREINFO_SYMBOL(node_memblk);
- VMCOREINFO_LENGTH(node_memblk, NR_NODE_MEMBLKS);
- VMCOREINFO_STRUCT_SIZE(node_memblk_s);
- VMCOREINFO_OFFSET(node_memblk_s, start_paddr);
- VMCOREINFO_OFFSET(node_memblk_s, size);
-#endif
-#if CONFIG_PGTABLE_LEVELS == 3
- VMCOREINFO_CONFIG(PGTABLE_3);
-#elif CONFIG_PGTABLE_LEVELS == 4
- VMCOREINFO_CONFIG(PGTABLE_4);
-#endif
-}
-
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
deleted file mode 100644
index 2671688d34..0000000000
--- a/arch/ia64/kernel/mca.c
+++ /dev/null
@@ -1,2111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * File: mca.c
- * Purpose: Generic MCA handling layer
- *
- * Copyright (C) 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Copyright (C) 2002 Dell Inc.
- * Copyright (C) Matt Domsch <Matt_Domsch@dell.com>
- *
- * Copyright (C) 2002 Intel
- * Copyright (C) Jenna Hall <jenna.s.hall@intel.com>
- *
- * Copyright (C) 2001 Intel
- * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com>
- *
- * Copyright (C) 2000 Intel
- * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
- *
- * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
- *
- * Copyright (C) 2006 FUJITSU LIMITED
- * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
- *
- * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
- * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
- * added min save state dump, added INIT handler.
- *
- * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com>
- * Added setup of CMCI and CPEI IRQs, logging of corrected platform
- * errors, completed code for logging of corrected & uncorrected
- * machine check errors, and updated for conformance with Nov. 2000
- * revision of the SAL 3.0 spec.
- *
- * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com>
- * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
- * set SAL default return values, changed error record structure to
- * linked list, added init call to sal_get_state_info_size().
- *
- * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com>
- * GUID cleanups.
- *
- * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com>
- * Added INIT backtrace support.
- *
- * 2003-12-08 Keith Owens <kaos@sgi.com>
- * smp_call_function() must not be called from interrupt context
- * (can deadlock on tasklist_lock).
- * Use keventd to call smp_call_function().
- *
- * 2004-02-01 Keith Owens <kaos@sgi.com>
- * Avoid deadlock when using printk() for MCA and INIT records.
- * Delete all record printing code, moved to salinfo_decode in user
- * space. Mark variables and functions static where possible.
- * Delete dead variables and functions. Reorder to remove the need
- * for forward declarations and to consolidate related code.
- *
- * 2005-08-12 Keith Owens <kaos@sgi.com>
- * Convert MCA/INIT handlers to use per event stacks and SAL/OS
- * state.
- *
- * 2005-10-07 Keith Owens <kaos@sgi.com>
- * Add notify_die() hooks.
- *
- * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
- * Add printing support for MCA/INIT.
- *
- * 2007-04-27 Russ Anderson <rja@sgi.com>
- * Support multiple cpus going through OS_MCA in the same event.
- */
-#include <linux/jiffies.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/memblock.h>
-#include <linux/acpi.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/workqueue.h>
-#include <linux/cpumask.h>
-#include <linux/kdebug.h>
-#include <linux/cpu.h>
-#include <linux/gfp.h>
-
-#include <asm/delay.h>
-#include <asm/efi.h>
-#include <asm/meminit.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/mca.h>
-#include <asm/mca_asm.h>
-#include <asm/kexec.h>
-
-#include <asm/irq.h>
-#include <asm/hw_irq.h>
-#include <asm/tlb.h>
-
-#include "mca_drv.h"
-#include "entry.h"
-#include "irq.h"
-
-#if defined(IA64_MCA_DEBUG_INFO)
-# define IA64_MCA_DEBUG(fmt...) printk(fmt)
-#else
-# define IA64_MCA_DEBUG(fmt...) do {} while (0)
-#endif
-
-#define NOTIFY_INIT(event, regs, arg, spin) \
-do { \
- if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
- == NOTIFY_STOP) && ((spin) == 1)) \
- ia64_mca_spin(__func__); \
-} while (0)
-
-#define NOTIFY_MCA(event, regs, arg, spin) \
-do { \
- if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
- == NOTIFY_STOP) && ((spin) == 1)) \
- ia64_mca_spin(__func__); \
-} while (0)
-
-/* Used by mca_asm.S */
-DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
-DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
-DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
-DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
-DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */
-
-unsigned long __per_cpu_mca[NR_CPUS];
-
-/* In mca_asm.S */
-extern void ia64_os_init_dispatch_monarch (void);
-extern void ia64_os_init_dispatch_slave (void);
-
-static int monarch_cpu = -1;
-
-static ia64_mc_info_t ia64_mc_info;
-
-#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
-#define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
-#define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
-#define CPE_HISTORY_LENGTH 5
-#define CMC_HISTORY_LENGTH 5
-
-static struct timer_list cpe_poll_timer;
-static struct timer_list cmc_poll_timer;
-/*
- * This variable tells whether we are currently in polling mode.
- * Start with this in the wrong state so we won't play w/ timers
- * before the system is ready.
- */
-static int cmc_polling_enabled = 1;
-
-/*
- * Clearing this variable prevents CPE polling from getting activated
- * in mca_late_init. Use it if your system doesn't provide a CPEI,
- * but encounters problems retrieving CPE logs. This should only be
- * necessary for debugging.
- */
-static int cpe_poll_enabled = 1;
-
-extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
-
-static int mca_init __initdata;
-
-/*
- * limited & delayed printing support for MCA/INIT handler
- */
-
-#define mprintk(fmt...) ia64_mca_printk(fmt)
-
-#define MLOGBUF_SIZE (512+256*NR_CPUS)
-#define MLOGBUF_MSGMAX 256
-static char mlogbuf[MLOGBUF_SIZE];
-static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */
-static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */
-static unsigned long mlogbuf_start;
-static unsigned long mlogbuf_end;
-static unsigned int mlogbuf_finished = 0;
-static unsigned long mlogbuf_timestamp = 0;
-
-static int loglevel_save = -1;
-#define BREAK_LOGLEVEL(__console_loglevel) \
- oops_in_progress = 1; \
- if (loglevel_save < 0) \
- loglevel_save = __console_loglevel; \
- __console_loglevel = 15;
-
-#define RESTORE_LOGLEVEL(__console_loglevel) \
- if (loglevel_save >= 0) { \
- __console_loglevel = loglevel_save; \
- loglevel_save = -1; \
- } \
- mlogbuf_finished = 0; \
- oops_in_progress = 0;
-
-/*
- * Push messages into buffer, print them later if not urgent.
- */
-void ia64_mca_printk(const char *fmt, ...)
-{
- va_list args;
- int printed_len;
- char temp_buf[MLOGBUF_MSGMAX];
- char *p;
-
- va_start(args, fmt);
- printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
- va_end(args);
-
- /* Copy the output into mlogbuf */
- if (oops_in_progress) {
- /* mlogbuf was abandoned, use printk directly instead. */
- printk("%s", temp_buf);
- } else {
- spin_lock(&mlogbuf_wlock);
- for (p = temp_buf; *p; p++) {
- unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
- if (next != mlogbuf_start) {
- mlogbuf[mlogbuf_end] = *p;
- mlogbuf_end = next;
- } else {
- /* buffer full */
- break;
- }
- }
- mlogbuf[mlogbuf_end] = '\0';
- spin_unlock(&mlogbuf_wlock);
- }
-}
-EXPORT_SYMBOL(ia64_mca_printk);
-
-/*
- * Print buffered messages.
- * NOTE: call this after returning normal context. (ex. from salinfod)
- */
-void ia64_mlogbuf_dump(void)
-{
- char temp_buf[MLOGBUF_MSGMAX];
- char *p;
- unsigned long index;
- unsigned long flags;
- unsigned int printed_len;
-
- /* Get output from mlogbuf */
- while (mlogbuf_start != mlogbuf_end) {
- temp_buf[0] = '\0';
- p = temp_buf;
- printed_len = 0;
-
- spin_lock_irqsave(&mlogbuf_rlock, flags);
-
- index = mlogbuf_start;
- while (index != mlogbuf_end) {
- *p = mlogbuf[index];
- index = (index + 1) % MLOGBUF_SIZE;
- if (!*p)
- break;
- p++;
- if (++printed_len >= MLOGBUF_MSGMAX - 1)
- break;
- }
- *p = '\0';
- if (temp_buf[0])
- printk("%s", temp_buf);
- mlogbuf_start = index;
-
- mlogbuf_timestamp = 0;
- spin_unlock_irqrestore(&mlogbuf_rlock, flags);
- }
-}
-EXPORT_SYMBOL(ia64_mlogbuf_dump);
-
-/*
- * Call this if system is going to down or if immediate flushing messages to
- * console is required. (ex. recovery was failed, crash dump is going to be
- * invoked, long-wait rendezvous etc.)
- * NOTE: this should be called from monarch.
- */
-static void ia64_mlogbuf_finish(int wait)
-{
- BREAK_LOGLEVEL(console_loglevel);
-
- ia64_mlogbuf_dump();
- printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
- "MCA/INIT might be dodgy or fail.\n");
-
- if (!wait)
- return;
-
- /* wait for console */
- printk("Delaying for 5 seconds...\n");
- udelay(5*1000000);
-
- mlogbuf_finished = 1;
-}
-
-/*
- * Print buffered messages from INIT context.
- */
-static void ia64_mlogbuf_dump_from_init(void)
-{
- if (mlogbuf_finished)
- return;
-
- if (mlogbuf_timestamp &&
- time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
- printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
- " and the system seems to be messed up.\n");
- ia64_mlogbuf_finish(0);
- return;
- }
-
- if (!spin_trylock(&mlogbuf_rlock)) {
- printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
- "Generated messages other than stack dump will be "
- "buffered to mlogbuf and will be printed later.\n");
- printk(KERN_ERR "INIT: If messages would not printed after "
- "this INIT, wait 30sec and assert INIT again.\n");
- if (!mlogbuf_timestamp)
- mlogbuf_timestamp = jiffies;
- return;
- }
- spin_unlock(&mlogbuf_rlock);
- ia64_mlogbuf_dump();
-}
-
-static inline void
-ia64_mca_spin(const char *func)
-{
- if (monarch_cpu == smp_processor_id())
- ia64_mlogbuf_finish(0);
- mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
- while (1)
- cpu_relax();
-}
-/*
- * IA64_MCA log support
- */
-#define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
-#define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
-
-typedef struct ia64_state_log_s
-{
- spinlock_t isl_lock;
- int isl_index;
- unsigned long isl_count;
- ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
-} ia64_state_log_t;
-
-static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
-
-#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
-#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
-#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
-#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
-#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
-#define IA64_LOG_INDEX_INC(it) \
- {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
- ia64_state_log[it].isl_count++;}
-#define IA64_LOG_INDEX_DEC(it) \
- ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
-#define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
-#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
-#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
-
-static inline void ia64_log_allocate(int it, u64 size)
-{
- ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
- (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
- if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
- panic("%s: Failed to allocate %llu bytes\n", __func__, size);
-
- ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
- (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
- if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
- panic("%s: Failed to allocate %llu bytes\n", __func__, size);
-}
-
-/*
- * ia64_log_init
- * Reset the OS ia64 log buffer
- * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
- * Outputs : None
- */
-static void __init
-ia64_log_init(int sal_info_type)
-{
- u64 max_size = 0;
-
- IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
- IA64_LOG_LOCK_INIT(sal_info_type);
-
- // SAL will tell us the maximum size of any error record of this type
- max_size = ia64_sal_get_state_info_size(sal_info_type);
- if (!max_size)
- /* alloc_bootmem() doesn't like zero-sized allocations! */
- return;
-
- // set up OS data structures to hold error info
- ia64_log_allocate(sal_info_type, max_size);
-}
-
-/*
- * ia64_log_get
- *
- * Get the current MCA log from SAL and copy it into the OS log buffer.
- *
- * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
- * irq_safe whether you can use printk at this point
- * Outputs : size (total record length)
- * *buffer (ptr to error record)
- *
- */
-static u64
-ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
-{
- sal_log_record_header_t *log_buffer;
- u64 total_len = 0;
- unsigned long s;
-
- IA64_LOG_LOCK(sal_info_type);
-
- /* Get the process state information */
- log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
-
- total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
-
- if (total_len) {
- IA64_LOG_INDEX_INC(sal_info_type);
- IA64_LOG_UNLOCK(sal_info_type);
- if (irq_safe) {
- IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
- __func__, sal_info_type, total_len);
- }
- *buffer = (u8 *) log_buffer;
- return total_len;
- } else {
- IA64_LOG_UNLOCK(sal_info_type);
- return 0;
- }
-}
-
-/*
- * ia64_mca_log_sal_error_record
- *
- * This function retrieves a specified error record type from SAL
- * and wakes up any processes waiting for error records.
- *
- * Inputs : sal_info_type (Type of error record MCA/CMC/CPE)
- * FIXME: remove MCA and irq_safe.
- */
-static void
-ia64_mca_log_sal_error_record(int sal_info_type)
-{
- u8 *buffer;
- sal_log_record_header_t *rh;
- u64 size;
- int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
-#ifdef IA64_MCA_DEBUG_INFO
- static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
-#endif
-
- size = ia64_log_get(sal_info_type, &buffer, irq_safe);
- if (!size)
- return;
-
- salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
-
- if (irq_safe)
- IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
- smp_processor_id(),
- sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
-
- /* Clear logs from corrected errors in case there's no user-level logger */
- rh = (sal_log_record_header_t *)buffer;
- if (rh->severity == sal_log_severity_corrected)
- ia64_sal_clear_state_info(sal_info_type);
-}
-
-/*
- * search_mca_table
- * See if the MCA surfaced in an instruction range
- * that has been tagged as recoverable.
- *
- * Inputs
- * first First address range to check
- * last Last address range to check
- * ip Instruction pointer, address we are looking for
- *
- * Return value:
- * 1 on Success (in the table)/ 0 on Failure (not in the table)
- */
-int
-search_mca_table (const struct mca_table_entry *first,
- const struct mca_table_entry *last,
- unsigned long ip)
-{
- const struct mca_table_entry *curr;
- u64 curr_start, curr_end;
-
- curr = first;
- while (curr <= last) {
- curr_start = (u64) &curr->start_addr + curr->start_addr;
- curr_end = (u64) &curr->end_addr + curr->end_addr;
-
- if ((ip >= curr_start) && (ip <= curr_end)) {
- return 1;
- }
- curr++;
- }
- return 0;
-}
-
-/* Given an address, look for it in the mca tables. */
-int mca_recover_range(unsigned long addr)
-{
- extern struct mca_table_entry __start___mca_table[];
- extern struct mca_table_entry __stop___mca_table[];
-
- return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
-}
-EXPORT_SYMBOL_GPL(mca_recover_range);
-
-int cpe_vector = -1;
-int ia64_cpe_irq = -1;
-
-static irqreturn_t
-ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
-{
- static unsigned long cpe_history[CPE_HISTORY_LENGTH];
- static int index;
- static DEFINE_SPINLOCK(cpe_history_lock);
-
- IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
- __func__, cpe_irq, smp_processor_id());
-
- /* SAL spec states this should run w/ interrupts enabled */
- local_irq_enable();
-
- spin_lock(&cpe_history_lock);
- if (!cpe_poll_enabled && cpe_vector >= 0) {
-
- int i, count = 1; /* we know 1 happened now */
- unsigned long now = jiffies;
-
- for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
- if (now - cpe_history[i] <= HZ)
- count++;
- }
-
- IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
- if (count >= CPE_HISTORY_LENGTH) {
-
- cpe_poll_enabled = 1;
- spin_unlock(&cpe_history_lock);
- disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
-
- /*
- * Corrected errors will still be corrected, but
- * make sure there's a log somewhere that indicates
- * something is generating more than we can handle.
- */
- printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
-
- mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
-
- /* lock already released, get out now */
- goto out;
- } else {
- cpe_history[index++] = now;
- if (index == CPE_HISTORY_LENGTH)
- index = 0;
- }
- }
- spin_unlock(&cpe_history_lock);
-out:
- /* Get the CPE error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
-
- local_irq_disable();
-
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_register_cpev
- *
- * Register the corrected platform error vector with SAL.
- *
- * Inputs
- * cpev Corrected Platform Error Vector number
- *
- * Outputs
- * None
- */
-void
-ia64_mca_register_cpev (int cpev)
-{
- /* Register the CPE interrupt vector with SAL */
- struct ia64_sal_retval isrv;
-
- isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
- if (isrv.status) {
- printk(KERN_ERR "Failed to register Corrected Platform "
- "Error interrupt vector with SAL (status %ld)\n", isrv.status);
- return;
- }
-
- IA64_MCA_DEBUG("%s: corrected platform error "
- "vector %#x registered\n", __func__, cpev);
-}
-
-/*
- * ia64_mca_cmc_vector_setup
- *
- * Setup the corrected machine check vector register in the processor.
- * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * None
- *
- * Outputs
- * None
- */
-void
-ia64_mca_cmc_vector_setup (void)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = 0;
- cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
- cmcv.cmcv_vector = IA64_CMC_VECTOR;
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
- __func__, smp_processor_id(), IA64_CMC_VECTOR);
-
- IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
- __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
-}
-
-/*
- * ia64_mca_cmc_vector_disable
- *
- * Mask the corrected machine check vector register in the processor.
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * dummy(unused)
- *
- * Outputs
- * None
- */
-static void
-ia64_mca_cmc_vector_disable (void *dummy)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
-
- cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
- __func__, smp_processor_id(), cmcv.cmcv_vector);
-}
-
-/*
- * ia64_mca_cmc_vector_enable
- *
- * Unmask the corrected machine check vector register in the processor.
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * dummy(unused)
- *
- * Outputs
- * None
- */
-static void
-ia64_mca_cmc_vector_enable (void *dummy)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
-
- cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
- __func__, smp_processor_id(), cmcv.cmcv_vector);
-}
-
-/*
- * ia64_mca_cmc_vector_disable_keventd
- *
- * Called via keventd (smp_call_function() is not safe in interrupt context) to
- * disable the cmc interrupt vector.
- */
-static void
-ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
-{
- on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
-}
-
-/*
- * ia64_mca_cmc_vector_enable_keventd
- *
- * Called via keventd (smp_call_function() is not safe in interrupt context) to
- * enable the cmc interrupt vector.
- */
-static void
-ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
-{
- on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
-}
-
-/*
- * ia64_mca_wakeup
- *
- * Send an inter-cpu interrupt to wake-up a particular cpu.
- *
- * Inputs : cpuid
- * Outputs : None
- */
-static void
-ia64_mca_wakeup(int cpu)
-{
- ia64_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
-}
-
-/*
- * ia64_mca_wakeup_all
- *
- * Wakeup all the slave cpus which have rendez'ed previously.
- *
- * Inputs : None
- * Outputs : None
- */
-static void
-ia64_mca_wakeup_all(void)
-{
- int cpu;
-
- /* Clear the Rendez checkin flag for all cpus */
- for_each_online_cpu(cpu) {
- if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
- ia64_mca_wakeup(cpu);
- }
-
-}
-
-/*
- * ia64_mca_rendez_interrupt_handler
- *
- * This is handler used to put slave processors into spinloop
- * while the monarch processor does the mca handling and later
- * wake each slave up once the monarch is done. The state
- * IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
- * in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
- * the cpu has come out of OS rendezvous.
- *
- * Inputs : None
- * Outputs : None
- */
-static irqreturn_t
-ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
-{
- unsigned long flags;
- int cpu = smp_processor_id();
- struct ia64_mca_notify_die nd =
- { .sos = NULL, .monarch_cpu = &monarch_cpu };
-
- /* Mask all interrupts */
- local_irq_save(flags);
-
- NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
-
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
- /* Register with the SAL monarch that the slave has
- * reached SAL
- */
- ia64_sal_mc_rendez();
-
- NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
-
- /* Wait for the monarch cpu to exit. */
- while (monarch_cpu != -1)
- cpu_relax(); /* spin until monarch leaves */
-
- NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
-
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
- /* Enable all interrupts */
- local_irq_restore(flags);
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_wakeup_int_handler
- *
- * The interrupt handler for processing the inter-cpu interrupt to the
- * slave cpu which was spinning in the rendez loop.
- * Since this spinning is done by turning off the interrupts and
- * polling on the wakeup-interrupt bit in the IRR, there is
- * nothing useful to be done in the handler.
- *
- * Inputs : wakeup_irq (Wakeup-interrupt bit)
- * arg (Interrupt handler specific argument)
- * Outputs : None
- *
- */
-static irqreturn_t
-ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg)
-{
- return IRQ_HANDLED;
-}
-
-/* Function pointer for extra MCA recovery */
-int (*ia64_mca_ucmc_extension)
- (void*,struct ia64_sal_os_state*)
- = NULL;
-
-int
-ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
-{
- if (ia64_mca_ucmc_extension)
- return 1;
-
- ia64_mca_ucmc_extension = fn;
- return 0;
-}
-
-void
-ia64_unreg_MCA_extension(void)
-{
- if (ia64_mca_ucmc_extension)
- ia64_mca_ucmc_extension = NULL;
-}
-
-EXPORT_SYMBOL(ia64_reg_MCA_extension);
-EXPORT_SYMBOL(ia64_unreg_MCA_extension);
-
-
-static inline void
-copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat)
-{
- u64 fslot, tslot, nat;
- *tr = *fr;
- fslot = ((unsigned long)fr >> 3) & 63;
- tslot = ((unsigned long)tr >> 3) & 63;
- *tnat &= ~(1UL << tslot);
- nat = (fnat >> fslot) & 1;
- *tnat |= (nat << tslot);
-}
-
-/* Change the comm field on the MCA/INT task to include the pid that
- * was interrupted, it makes for easier debugging. If that pid was 0
- * (swapper or nested MCA/INIT) then use the start of the previous comm
- * field suffixed with its cpu.
- */
-
-static void
-ia64_mca_modify_comm(const struct task_struct *previous_current)
-{
- char *p, comm[sizeof(current->comm)];
- if (previous_current->pid)
- snprintf(comm, sizeof(comm), "%s %d",
- current->comm, previous_current->pid);
- else {
- int l;
- if ((p = strchr(previous_current->comm, ' ')))
- l = p - previous_current->comm;
- else
- l = strlen(previous_current->comm);
- snprintf(comm, sizeof(comm), "%s %*s %d",
- current->comm, l, previous_current->comm,
- task_thread_info(previous_current)->cpu);
- }
- memcpy(current->comm, comm, sizeof(current->comm));
-}
-
-static void
-finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
- unsigned long *nat)
-{
- const struct pal_min_state_area *ms = sos->pal_min_state;
- const u64 *bank;
-
- /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
- * pmsa_{xip,xpsr,xfs}
- */
- if (ia64_psr(regs)->ic) {
- regs->cr_iip = ms->pmsa_iip;
- regs->cr_ipsr = ms->pmsa_ipsr;
- regs->cr_ifs = ms->pmsa_ifs;
- } else {
- regs->cr_iip = ms->pmsa_xip;
- regs->cr_ipsr = ms->pmsa_xpsr;
- regs->cr_ifs = ms->pmsa_xfs;
-
- sos->iip = ms->pmsa_iip;
- sos->ipsr = ms->pmsa_ipsr;
- sos->ifs = ms->pmsa_ifs;
- }
- regs->pr = ms->pmsa_pr;
- regs->b0 = ms->pmsa_br0;
- regs->ar_rsc = ms->pmsa_rsc;
- copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat);
- copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat);
- copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat);
- copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat);
- copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat);
- copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat);
- copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat);
- copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat);
- copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat);
- copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat);
- copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat);
- if (ia64_psr(regs)->bn)
- bank = ms->pmsa_bank1_gr;
- else
- bank = ms->pmsa_bank0_gr;
- copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat);
- copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat);
- copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat);
- copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat);
- copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat);
- copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat);
- copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat);
- copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat);
- copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat);
- copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat);
- copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat);
- copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat);
- copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat);
- copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat);
- copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat);
- copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat);
-}
-
-/* On entry to this routine, we are running on the per cpu stack, see
- * mca_asm.h. The original stack has not been touched by this event. Some of
- * the original stack's registers will be in the RBS on this stack. This stack
- * also contains a partial pt_regs and switch_stack, the rest of the data is in
- * PAL minstate.
- *
- * The first thing to do is modify the original stack to look like a blocked
- * task so we can run backtrace on the original task. Also mark the per cpu
- * stack as current to ensure that we use the correct task state, it also means
- * that we can do backtrace on the MCA/INIT handler code itself.
- */
-
-static struct task_struct *
-ia64_mca_modify_original_stack(struct pt_regs *regs,
- const struct switch_stack *sw,
- struct ia64_sal_os_state *sos,
- const char *type)
-{
- char *p;
- ia64_va va;
- extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
- const struct pal_min_state_area *ms = sos->pal_min_state;
- struct task_struct *previous_current;
- struct pt_regs *old_regs;
- struct switch_stack *old_sw;
- unsigned size = sizeof(struct pt_regs) +
- sizeof(struct switch_stack) + 16;
- unsigned long *old_bspstore, *old_bsp;
- unsigned long *new_bspstore, *new_bsp;
- unsigned long old_unat, old_rnat, new_rnat, nat;
- u64 slots, loadrs = regs->loadrs;
- u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
- u64 ar_bspstore = regs->ar_bspstore;
- u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
- const char *msg;
- int cpu = smp_processor_id();
-
- previous_current = curr_task(cpu);
- ia64_set_curr_task(cpu, current);
- if ((p = strchr(current->comm, ' ')))
- *p = '\0';
-
- /* Best effort attempt to cope with MCA/INIT delivered while in
- * physical mode.
- */
- regs->cr_ipsr = ms->pmsa_ipsr;
- if (ia64_psr(regs)->dt == 0) {
- va.l = r12;
- if (va.f.reg == 0) {
- va.f.reg = 7;
- r12 = va.l;
- }
- va.l = r13;
- if (va.f.reg == 0) {
- va.f.reg = 7;
- r13 = va.l;
- }
- }
- if (ia64_psr(regs)->rt == 0) {
- va.l = ar_bspstore;
- if (va.f.reg == 0) {
- va.f.reg = 7;
- ar_bspstore = va.l;
- }
- va.l = ar_bsp;
- if (va.f.reg == 0) {
- va.f.reg = 7;
- ar_bsp = va.l;
- }
- }
-
- /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
- * have been copied to the old stack, the old stack may fail the
- * validation tests below. So ia64_old_stack() must restore the dirty
- * registers from the new stack. The old and new bspstore probably
- * have different alignments, so loadrs calculated on the old bsp
- * cannot be used to restore from the new bsp. Calculate a suitable
- * loadrs for the new stack and save it in the new pt_regs, where
- * ia64_old_stack() can get it.
- */
- old_bspstore = (unsigned long *)ar_bspstore;
- old_bsp = (unsigned long *)ar_bsp;
- slots = ia64_rse_num_regs(old_bspstore, old_bsp);
- new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET);
- new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
- regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
-
- /* Verify the previous stack state before we change it */
- if (user_mode(regs)) {
- msg = "occurred in user space";
- /* previous_current is guaranteed to be valid when the task was
- * in user space, so ...
- */
- ia64_mca_modify_comm(previous_current);
- goto no_mod;
- }
-
- if (r13 != sos->prev_IA64_KR_CURRENT) {
- msg = "inconsistent previous current and r13";
- goto no_mod;
- }
-
- if (!mca_recover_range(ms->pmsa_iip)) {
- if ((r12 - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent r12 and r13";
- goto no_mod;
- }
- if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent ar.bspstore and r13";
- goto no_mod;
- }
- va.p = old_bspstore;
- if (va.f.reg < 5) {
- msg = "old_bspstore is in the wrong region";
- goto no_mod;
- }
- if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent ar.bsp and r13";
- goto no_mod;
- }
- size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
- if (ar_bspstore + size > r12) {
- msg = "no room for blocked state";
- goto no_mod;
- }
- }
-
- ia64_mca_modify_comm(previous_current);
-
- /* Make the original task look blocked. First stack a struct pt_regs,
- * describing the state at the time of interrupt. mca_asm.S built a
- * partial pt_regs, copy it and fill in the blanks using minstate.
- */
- p = (char *)r12 - sizeof(*regs);
- old_regs = (struct pt_regs *)p;
- memcpy(old_regs, regs, sizeof(*regs));
- old_regs->loadrs = loadrs;
- old_unat = old_regs->ar_unat;
- finish_pt_regs(old_regs, sos, &old_unat);
-
- /* Next stack a struct switch_stack. mca_asm.S built a partial
- * switch_stack, copy it and fill in the blanks using pt_regs and
- * minstate.
- *
- * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
- * ar.pfs is set to 0.
- *
- * unwind.c::unw_unwind() does special processing for interrupt frames.
- * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
- * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not
- * that this is documented, of course. Set PRED_NON_SYSCALL in the
- * switch_stack on the original stack so it will unwind correctly when
- * unwind.c reads pt_regs.
- *
- * thread.ksp is updated to point to the synthesized switch_stack.
- */
- p -= sizeof(struct switch_stack);
- old_sw = (struct switch_stack *)p;
- memcpy(old_sw, sw, sizeof(*sw));
- old_sw->caller_unat = old_unat;
- old_sw->ar_fpsr = old_regs->ar_fpsr;
- copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
- copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
- copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
- copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
- old_sw->b0 = (u64)ia64_leave_kernel;
- old_sw->b1 = ms->pmsa_br1;
- old_sw->ar_pfs = 0;
- old_sw->ar_unat = old_unat;
- old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
- previous_current->thread.ksp = (u64)p - 16;
-
- /* Finally copy the original stack's registers back to its RBS.
- * Registers from ar.bspstore through ar.bsp at the time of the event
- * are in the current RBS, copy them back to the original stack. The
- * copy must be done register by register because the original bspstore
- * and the current one have different alignments, so the saved RNAT
- * data occurs at different places.
- *
- * mca_asm does cover, so the old_bsp already includes all registers at
- * the time of MCA/INIT. It also does flushrs, so all registers before
- * this function have been written to backing store on the MCA/INIT
- * stack.
- */
- new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
- old_rnat = regs->ar_rnat;
- while (slots--) {
- if (ia64_rse_is_rnat_slot(new_bspstore)) {
- new_rnat = ia64_get_rnat(new_bspstore++);
- }
- if (ia64_rse_is_rnat_slot(old_bspstore)) {
- *old_bspstore++ = old_rnat;
- old_rnat = 0;
- }
- nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
- old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
- old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
- *old_bspstore++ = *new_bspstore++;
- }
- old_sw->ar_bspstore = (unsigned long)old_bspstore;
- old_sw->ar_rnat = old_rnat;
-
- sos->prev_task = previous_current;
- return previous_current;
-
-no_mod:
- mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
- smp_processor_id(), type, msg);
- old_unat = regs->ar_unat;
- finish_pt_regs(regs, sos, &old_unat);
- return previous_current;
-}
-
-/* The monarch/slave interaction is based on monarch_cpu and requires that all
- * slaves have entered rendezvous before the monarch leaves. If any cpu has
- * not entered rendezvous yet then wait a bit. The assumption is that any
- * slave that has not rendezvoused after a reasonable time is never going to do
- * so. In this context, slave includes cpus that respond to the MCA rendezvous
- * interrupt, as well as cpus that receive the INIT slave event.
- */
-
-static void
-ia64_wait_for_slaves(int monarch, const char *type)
-{
- int c, i , wait;
-
- /*
- * wait 5 seconds total for slaves (arbitrary)
- */
- for (i = 0; i < 5000; i++) {
- wait = 0;
- for_each_online_cpu(c) {
- if (c == monarch)
- continue;
- if (ia64_mc_info.imi_rendez_checkin[c]
- == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
- udelay(1000); /* short wait */
- wait = 1;
- break;
- }
- }
- if (!wait)
- goto all_in;
- }
-
- /*
- * Maybe slave(s) dead. Print buffered messages immediately.
- */
- ia64_mlogbuf_finish(0);
- mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
- for_each_online_cpu(c) {
- if (c == monarch)
- continue;
- if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
- mprintk(" %d", c);
- }
- mprintk("\n");
- return;
-
-all_in:
- mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
- return;
-}
-
-/* mca_insert_tr
- *
- * Switch rid when TR reload and needed!
- * iord: 1: itr, 2: itr;
- *
-*/
-static void mca_insert_tr(u64 iord)
-{
-
- int i;
- u64 old_rr;
- struct ia64_tr_entry *p;
- unsigned long psr;
- int cpu = smp_processor_id();
-
- if (!ia64_idtrs[cpu])
- return;
-
- psr = ia64_clear_ic();
- for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
- p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
- if (p->pte & 0x1) {
- old_rr = ia64_get_rr(p->ifa);
- if (old_rr != p->rr) {
- ia64_set_rr(p->ifa, p->rr);
- ia64_srlz_d();
- }
- ia64_ptr(iord, p->ifa, p->itir >> 2);
- ia64_srlz_i();
- if (iord & 0x1) {
- ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
- ia64_srlz_i();
- }
- if (iord & 0x2) {
- ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
- ia64_srlz_i();
- }
- if (old_rr != p->rr) {
- ia64_set_rr(p->ifa, old_rr);
- ia64_srlz_d();
- }
- }
- }
- ia64_set_psr(psr);
-}
-
-/*
- * ia64_mca_handler
- *
- * This is uncorrectable machine check handler called from OS_MCA
- * dispatch code which is in turn called from SAL_CHECK().
- * This is the place where the core of OS MCA handling is done.
- * Right now the logs are extracted and displayed in a well-defined
- * format. This handler code is supposed to be run only on the
- * monarch processor. Once the monarch is done with MCA handling
- * further MCA logging is enabled by clearing logs.
- * Monarch also has the duty of sending wakeup-IPIs to pull the
- * slave processors out of rendezvous spinloop.
- *
- * If multiple processors call into OS_MCA, the first will become
- * the monarch. Subsequent cpus will be recorded in the mca_cpu
- * bitmask. After the first monarch has processed its MCA, it
- * will wake up the next cpu in the mca_cpu bitmask and then go
- * into the rendezvous loop. When all processors have serviced
- * their MCA, the last monarch frees up the rest of the processors.
- */
-void
-ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
- struct ia64_sal_os_state *sos)
-{
- int recover, cpu = smp_processor_id();
- struct task_struct *previous_current;
- struct ia64_mca_notify_die nd =
- { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
- static atomic_t mca_count;
- static cpumask_t mca_cpu;
-
- if (atomic_add_return(1, &mca_count) == 1) {
- monarch_cpu = cpu;
- sos->monarch = 1;
- } else {
- cpumask_set_cpu(cpu, &mca_cpu);
- sos->monarch = 0;
- }
- mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
- "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
-
- previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
-
- NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
-
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
- if (sos->monarch) {
- ia64_wait_for_slaves(cpu, "MCA");
-
- /* Wakeup all the processors which are spinning in the
- * rendezvous loop. They will leave SAL, then spin in the OS
- * with interrupts disabled until this monarch cpu leaves the
- * MCA handler. That gets control back to the OS so we can
- * backtrace the other cpus, backtrace when spinning in SAL
- * does not work.
- */
- ia64_mca_wakeup_all();
- } else {
- while (cpumask_test_cpu(cpu, &mca_cpu))
- cpu_relax(); /* spin until monarch wakes us */
- }
-
- NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
-
- /* Get the MCA error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
-
- /* MCA error recovery */
- recover = (ia64_mca_ucmc_extension
- && ia64_mca_ucmc_extension(
- IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
- sos));
-
- if (recover) {
- sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
- rh->severity = sal_log_severity_corrected;
- ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
- sos->os_status = IA64_MCA_CORRECTED;
- } else {
- /* Dump buffered message to console */
- ia64_mlogbuf_finish(1);
- }
-
- if (__this_cpu_read(ia64_mca_tr_reload)) {
- mca_insert_tr(0x1); /*Reload dynamic itrs*/
- mca_insert_tr(0x2); /*Reload dynamic itrs*/
- }
-
- NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
-
- if (atomic_dec_return(&mca_count) > 0) {
- int i;
-
- /* wake up the next monarch cpu,
- * and put this cpu in the rendez loop.
- */
- for_each_online_cpu(i) {
- if (cpumask_test_cpu(i, &mca_cpu)) {
- monarch_cpu = i;
- cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */
- while (monarch_cpu != -1)
- cpu_relax(); /* spin until last cpu leaves */
- ia64_set_curr_task(cpu, previous_current);
- ia64_mc_info.imi_rendez_checkin[cpu]
- = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
- return;
- }
- }
- }
- ia64_set_curr_task(cpu, previous_current);
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
- monarch_cpu = -1; /* This frees the slaves and previous monarchs */
-}
-
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
-
-/*
- * ia64_mca_cmc_int_handler
- *
- * This is corrected machine check interrupt handler.
- * Right now the logs are extracted and displayed in a well-defined
- * format.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- *
- * Outputs
- * None
- */
-static irqreturn_t
-ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
-{
- static unsigned long cmc_history[CMC_HISTORY_LENGTH];
- static int index;
- static DEFINE_SPINLOCK(cmc_history_lock);
-
- IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
- __func__, cmc_irq, smp_processor_id());
-
- /* SAL spec states this should run w/ interrupts enabled */
- local_irq_enable();
-
- spin_lock(&cmc_history_lock);
- if (!cmc_polling_enabled) {
- int i, count = 1; /* we know 1 happened now */
- unsigned long now = jiffies;
-
- for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
- if (now - cmc_history[i] <= HZ)
- count++;
- }
-
- IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
- if (count >= CMC_HISTORY_LENGTH) {
-
- cmc_polling_enabled = 1;
- spin_unlock(&cmc_history_lock);
- /* If we're being hit with CMC interrupts, we won't
- * ever execute the schedule_work() below. Need to
- * disable CMC interrupts on this processor now.
- */
- ia64_mca_cmc_vector_disable(NULL);
- schedule_work(&cmc_disable_work);
-
- /*
- * Corrected errors will still be corrected, but
- * make sure there's a log somewhere that indicates
- * something is generating more than we can handle.
- */
- printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
-
- mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
-
- /* lock already released, get out now */
- goto out;
- } else {
- cmc_history[index++] = now;
- if (index == CMC_HISTORY_LENGTH)
- index = 0;
- }
- }
- spin_unlock(&cmc_history_lock);
-out:
- /* Get the CMC error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
-
- local_irq_disable();
-
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cmc_int_caller
- *
- * Triggered by sw interrupt from CMC polling routine. Calls
- * real interrupt handler and either triggers a sw interrupt
- * on the next cpu or does cleanup at the end.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- * Outputs
- * handled
- */
-static irqreturn_t
-ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
-{
- static int start_count = -1;
- unsigned int cpuid;
-
- cpuid = smp_processor_id();
-
- /* If first cpu, update count */
- if (start_count == -1)
- start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
-
- ia64_mca_cmc_int_handler(cmc_irq, arg);
-
- cpuid = cpumask_next(cpuid+1, cpu_online_mask);
-
- if (cpuid < nr_cpu_ids) {
- ia64_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
- } else {
- /* If no log record, switch out of polling mode */
- if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
-
- printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
- schedule_work(&cmc_enable_work);
- cmc_polling_enabled = 0;
-
- } else {
-
- mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
- }
-
- start_count = -1;
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cmc_poll
- *
- * Poll for Corrected Machine Checks (CMCs)
- *
- * Inputs : dummy(unused)
- * Outputs : None
- *
- */
-static void
-ia64_mca_cmc_poll (struct timer_list *unused)
-{
- /* Trigger a CMC interrupt cascade */
- ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
- IA64_IPI_DM_INT, 0);
-}
-
-/*
- * ia64_mca_cpe_int_caller
- *
- * Triggered by sw interrupt from CPE polling routine. Calls
- * real interrupt handler and either triggers a sw interrupt
- * on the next cpu or does cleanup at the end.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- * Outputs
- * handled
- */
-static irqreturn_t
-ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
-{
- static int start_count = -1;
- static int poll_time = MIN_CPE_POLL_INTERVAL;
- unsigned int cpuid;
-
- cpuid = smp_processor_id();
-
- /* If first cpu, update count */
- if (start_count == -1)
- start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
-
- ia64_mca_cpe_int_handler(cpe_irq, arg);
-
- cpuid = cpumask_next(cpuid+1, cpu_online_mask);
-
- if (cpuid < NR_CPUS) {
- ia64_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
- } else {
- /*
- * If a log was recorded, increase our polling frequency,
- * otherwise, backoff or return to interrupt mode.
- */
- if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
- poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
- } else if (cpe_vector < 0) {
- poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
- } else {
- poll_time = MIN_CPE_POLL_INTERVAL;
-
- printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
- enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
- cpe_poll_enabled = 0;
- }
-
- if (cpe_poll_enabled)
- mod_timer(&cpe_poll_timer, jiffies + poll_time);
- start_count = -1;
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cpe_poll
- *
- * Poll for Corrected Platform Errors (CPEs), trigger interrupt
- * on first cpu, from there it will trickle through all the cpus.
- *
- * Inputs : dummy(unused)
- * Outputs : None
- *
- */
-static void
-ia64_mca_cpe_poll (struct timer_list *unused)
-{
- /* Trigger a CPE interrupt cascade */
- ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
- IA64_IPI_DM_INT, 0);
-}
-
-static int
-default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
-{
- int c;
- struct task_struct *g, *t;
- if (val != DIE_INIT_MONARCH_PROCESS)
- return NOTIFY_DONE;
-#ifdef CONFIG_KEXEC
- if (atomic_read(&kdump_in_progress))
- return NOTIFY_DONE;
-#endif
-
- /*
- * FIXME: mlogbuf will brim over with INIT stack dumps.
- * To enable show_stack from INIT, we use oops_in_progress which should
- * be used in real oops. This would cause something wrong after INIT.
- */
- BREAK_LOGLEVEL(console_loglevel);
- ia64_mlogbuf_dump_from_init();
-
- printk(KERN_ERR "Processes interrupted by INIT -");
- for_each_online_cpu(c) {
- struct ia64_sal_os_state *s;
- t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
- s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
- g = s->prev_task;
- if (g) {
- if (g->pid)
- printk(" %d", g->pid);
- else
- printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
- }
- }
- printk("\n\n");
- if (read_trylock(&tasklist_lock)) {
- for_each_process_thread(g, t) {
- printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
- show_stack(t, NULL, KERN_DEFAULT);
- }
- read_unlock(&tasklist_lock);
- }
- /* FIXME: This will not restore zapped printk locks. */
- RESTORE_LOGLEVEL(console_loglevel);
- return NOTIFY_DONE;
-}
-
-/*
- * C portion of the OS INIT handler
- *
- * Called from ia64_os_init_dispatch
- *
- * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
- * this event. This code is used for both monarch and slave INIT events, see
- * sos->monarch.
- *
- * All INIT events switch to the INIT stack and change the previous process to
- * blocked status. If one of the INIT events is the monarch then we are
- * probably processing the nmi button/command. Use the monarch cpu to dump all
- * the processes. The slave INIT events all spin until the monarch cpu
- * returns. We can also get INIT slave events for MCA, in which case the MCA
- * process is the monarch.
- */
-
-void
-ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
- struct ia64_sal_os_state *sos)
-{
- static atomic_t slaves;
- static atomic_t monarchs;
- struct task_struct *previous_current;
- int cpu = smp_processor_id();
- struct ia64_mca_notify_die nd =
- { .sos = sos, .monarch_cpu = &monarch_cpu };
-
- NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
-
- mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
- sos->proc_state_param, cpu, sos->monarch);
- salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
-
- previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
- sos->os_status = IA64_INIT_RESUME;
-
- /* FIXME: Workaround for broken proms that drive all INIT events as
- * slaves. The last slave that enters is promoted to be a monarch.
- * Remove this code in September 2006, that gives platforms a year to
- * fix their proms and get their customers updated.
- */
- if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
- mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
- __func__, cpu);
- atomic_dec(&slaves);
- sos->monarch = 1;
- }
-
- /* FIXME: Workaround for broken proms that drive all INIT events as
- * monarchs. Second and subsequent monarchs are demoted to slaves.
- * Remove this code in September 2006, that gives platforms a year to
- * fix their proms and get their customers updated.
- */
- if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
- mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
- __func__, cpu);
- atomic_dec(&monarchs);
- sos->monarch = 0;
- }
-
- if (!sos->monarch) {
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
-
-#ifdef CONFIG_KEXEC
- while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
- udelay(1000);
-#else
- while (monarch_cpu == -1)
- cpu_relax(); /* spin until monarch enters */
-#endif
-
- NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
- NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
-
-#ifdef CONFIG_KEXEC
- while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
- udelay(1000);
-#else
- while (monarch_cpu != -1)
- cpu_relax(); /* spin until monarch leaves */
-#endif
-
- NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
-
- mprintk("Slave on cpu %d returning to normal service.\n", cpu);
- ia64_set_curr_task(cpu, previous_current);
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
- atomic_dec(&slaves);
- return;
- }
-
- monarch_cpu = cpu;
- NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
-
- /*
- * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
- * generated via the BMC's command-line interface, but since the console is on the
- * same serial line, the user will need some time to switch out of the BMC before
- * the dump begins.
- */
- mprintk("Delaying for 5 seconds...\n");
- udelay(5*1000000);
- ia64_wait_for_slaves(cpu, "INIT");
- /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
- * to default_monarch_init_process() above and just print all the
- * tasks.
- */
- NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
- NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
-
- mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
- atomic_dec(&monarchs);
- ia64_set_curr_task(cpu, previous_current);
- monarch_cpu = -1;
- return;
-}
-
-static int __init
-ia64_mca_disable_cpe_polling(char *str)
-{
- cpe_poll_enabled = 0;
- return 1;
-}
-
-__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
-
-/* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
- * these stacks can never sleep, they cannot return from the kernel to user
- * space, they do not appear in a normal ps listing. So there is no need to
- * format most of the fields.
- */
-
-static void
-format_mca_init_stack(void *mca_data, unsigned long offset,
- const char *type, int cpu)
-{
- struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
- struct thread_info *ti;
- memset(p, 0, KERNEL_STACK_SIZE);
- ti = task_thread_info(p);
- ti->flags = _TIF_MCA_INIT;
- ti->preempt_count = 1;
- ti->task = p;
- ti->cpu = cpu;
- p->stack = ti;
- p->__state = TASK_UNINTERRUPTIBLE;
- cpumask_set_cpu(cpu, &p->cpus_mask);
- INIT_LIST_HEAD(&p->tasks);
- p->parent = p->real_parent = p->group_leader = p;
- INIT_LIST_HEAD(&p->children);
- INIT_LIST_HEAD(&p->sibling);
- strscpy(p->comm, type, sizeof(p->comm)-1);
-}
-
-/* Caller prevents this from being called after init */
-static void * __ref mca_bootmem(void)
-{
- return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
-}
-
-/* Do per-CPU MCA-related initialization. */
-void
-ia64_mca_cpu_init(void *cpu_data)
-{
- void *pal_vaddr;
- void *data;
- long sz = sizeof(struct ia64_mca_cpu);
- int cpu = smp_processor_id();
- static int first_time = 1;
-
- /*
- * Structure will already be allocated if cpu has been online,
- * then offlined.
- */
- if (__per_cpu_mca[cpu]) {
- data = __va(__per_cpu_mca[cpu]);
- } else {
- if (first_time) {
- data = mca_bootmem();
- first_time = 0;
- } else
- data = (void *)__get_free_pages(GFP_ATOMIC,
- get_order(sz));
- if (!data)
- panic("Could not allocate MCA memory for cpu %d\n",
- cpu);
- }
- format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
- "MCA", cpu);
- format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
- "INIT", cpu);
- __this_cpu_write(ia64_mca_data, (__per_cpu_mca[cpu] = __pa(data)));
-
- /*
- * Stash away a copy of the PTE needed to map the per-CPU page.
- * We may need it during MCA recovery.
- */
- __this_cpu_write(ia64_mca_per_cpu_pte,
- pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)));
-
- /*
- * Also, stash away a copy of the PAL address and the PTE
- * needed to map it.
- */
- pal_vaddr = efi_get_pal_addr();
- if (!pal_vaddr)
- return;
- __this_cpu_write(ia64_mca_pal_base,
- GRANULEROUNDDOWN((unsigned long) pal_vaddr));
- __this_cpu_write(ia64_mca_pal_pte, pte_val(mk_pte_phys(__pa(pal_vaddr),
- PAGE_KERNEL)));
-}
-
-static int ia64_mca_cpu_online(unsigned int cpu)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- if (!cmc_polling_enabled)
- ia64_mca_cmc_vector_enable(NULL);
- local_irq_restore(flags);
- return 0;
-}
-
-/*
- * ia64_mca_init
- *
- * Do all the system level mca specific initialization.
- *
- * 1. Register spinloop and wakeup request interrupt vectors
- *
- * 2. Register OS_MCA handler entry point
- *
- * 3. Register OS_INIT handler entry point
- *
- * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
- *
- * Note that this initialization is done very early before some kernel
- * services are available.
- *
- * Inputs : None
- *
- * Outputs : None
- */
-void __init
-ia64_mca_init(void)
-{
- ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
- ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
- ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
- int i;
- long rc;
- struct ia64_sal_retval isrv;
- unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
- static struct notifier_block default_init_monarch_nb = {
- .notifier_call = default_monarch_init_process,
- .priority = 0/* we need to notified last */
- };
-
- IA64_MCA_DEBUG("%s: begin\n", __func__);
-
- /* Clear the Rendez checkin flag for all cpus */
- for(i = 0 ; i < NR_CPUS; i++)
- ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
-
- /*
- * Register the rendezvous spinloop and wakeup mechanism with SAL
- */
-
- /* Register the rendezvous interrupt vector with SAL */
- while (1) {
- isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
- SAL_MC_PARAM_MECHANISM_INT,
- IA64_MCA_RENDEZ_VECTOR,
- timeout,
- SAL_MC_PARAM_RZ_ALWAYS);
- rc = isrv.status;
- if (rc == 0)
- break;
- if (rc == -2) {
- printk(KERN_INFO "Increasing MCA rendezvous timeout from "
- "%ld to %ld milliseconds\n", timeout, isrv.v0);
- timeout = isrv.v0;
- NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
- continue;
- }
- printk(KERN_ERR "Failed to register rendezvous interrupt "
- "with SAL (status %ld)\n", rc);
- return;
- }
-
- /* Register the wakeup interrupt vector with SAL */
- isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
- SAL_MC_PARAM_MECHANISM_INT,
- IA64_MCA_WAKEUP_VECTOR,
- 0, 0);
- rc = isrv.status;
- if (rc) {
- printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
- "(status %ld)\n", rc);
- return;
- }
-
- IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
-
- ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
- /*
- * XXX - disable SAL checksum by setting size to 0; should be
- * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
- */
- ia64_mc_info.imi_mca_handler_size = 0;
-
- /* Register the os mca handler with SAL */
- if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
- ia64_mc_info.imi_mca_handler,
- ia64_tpa(mca_hldlr_ptr->gp),
- ia64_mc_info.imi_mca_handler_size,
- 0, 0, 0)))
- {
- printk(KERN_ERR "Failed to register OS MCA handler with SAL "
- "(status %ld)\n", rc);
- return;
- }
-
- IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
- ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
-
- /*
- * XXX - disable SAL checksum by setting size to 0, should be
- * size of the actual init handler in mca_asm.S.
- */
- ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp);
- ia64_mc_info.imi_monarch_init_handler_size = 0;
- ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
- ia64_mc_info.imi_slave_init_handler_size = 0;
-
- IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
- ia64_mc_info.imi_monarch_init_handler);
-
- /* Register the os init handler with SAL */
- if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
- ia64_mc_info.imi_monarch_init_handler,
- ia64_tpa(ia64_getreg(_IA64_REG_GP)),
- ia64_mc_info.imi_monarch_init_handler_size,
- ia64_mc_info.imi_slave_init_handler,
- ia64_tpa(ia64_getreg(_IA64_REG_GP)),
- ia64_mc_info.imi_slave_init_handler_size)))
- {
- printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
- "(status %ld)\n", rc);
- return;
- }
- if (register_die_notifier(&default_init_monarch_nb)) {
- printk(KERN_ERR "Failed to register default monarch INIT process\n");
- return;
- }
-
- IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
-
- /* Initialize the areas set aside by the OS to buffer the
- * platform/processor error states for MCA/INIT/CMC
- * handling.
- */
- ia64_log_init(SAL_INFO_TYPE_MCA);
- ia64_log_init(SAL_INFO_TYPE_INIT);
- ia64_log_init(SAL_INFO_TYPE_CMC);
- ia64_log_init(SAL_INFO_TYPE_CPE);
-
- mca_init = 1;
- printk(KERN_INFO "MCA related initialization done\n");
-}
-
-
-/*
- * These pieces cannot be done in ia64_mca_init() because it is called before
- * early_irq_init() which would wipe out our percpu irq registrations. But we
- * cannot leave them until ia64_mca_late_init() because by then all the other
- * processors have been brought online and have set their own CMC vectors to
- * point at a non-existant action. Called from arch_early_irq_init().
- */
-void __init ia64_mca_irq_init(void)
-{
- /*
- * Configure the CMCI/P vector and handler. Interrupts for CMC are
- * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
- */
- register_percpu_irq(IA64_CMC_VECTOR, ia64_mca_cmc_int_handler, 0,
- "cmc_hndlr");
- register_percpu_irq(IA64_CMCP_VECTOR, ia64_mca_cmc_int_caller, 0,
- "cmc_poll");
- ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
-
- /* Setup the MCA rendezvous interrupt vector */
- register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, ia64_mca_rendez_int_handler,
- 0, "mca_rdzv");
-
- /* Setup the MCA wakeup interrupt vector */
- register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, ia64_mca_wakeup_int_handler,
- 0, "mca_wkup");
-
- /* Setup the CPEI/P handler */
- register_percpu_irq(IA64_CPEP_VECTOR, ia64_mca_cpe_int_caller, 0,
- "cpe_poll");
-}
-
-/*
- * ia64_mca_late_init
- *
- * Opportunity to setup things that require initialization later
- * than ia64_mca_init. Setup a timer to poll for CPEs if the
- * platform doesn't support an interrupt driven mechanism.
- *
- * Inputs : None
- * Outputs : Status
- */
-static int __init
-ia64_mca_late_init(void)
-{
- if (!mca_init)
- return 0;
-
- /* Setup the CMCI/P vector and handler */
- timer_setup(&cmc_poll_timer, ia64_mca_cmc_poll, 0);
-
- /* Unmask/enable the vector */
- cmc_polling_enabled = 0;
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/mca:online",
- ia64_mca_cpu_online, NULL);
- IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
-
- /* Setup the CPEI/P vector and handler */
- cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
- timer_setup(&cpe_poll_timer, ia64_mca_cpe_poll, 0);
-
- {
- unsigned int irq;
-
- if (cpe_vector >= 0) {
- /* If platform supports CPEI, enable the irq. */
- irq = local_vector_to_irq(cpe_vector);
- if (irq > 0) {
- cpe_poll_enabled = 0;
- irq_set_status_flags(irq, IRQ_PER_CPU);
- if (request_irq(irq, ia64_mca_cpe_int_handler,
- 0, "cpe_hndlr", NULL))
- pr_err("Failed to register cpe_hndlr interrupt\n");
- ia64_cpe_irq = irq;
- ia64_mca_register_cpev(cpe_vector);
- IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
- __func__);
- return 0;
- }
- printk(KERN_ERR "%s: Failed to find irq for CPE "
- "interrupt handler, vector %d\n",
- __func__, cpe_vector);
- }
- /* If platform doesn't support CPEI, get the timer going. */
- if (cpe_poll_enabled) {
- ia64_mca_cpe_poll(0UL);
- IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
- }
- }
-
- return 0;
-}
-
-device_initcall(ia64_mca_late_init);
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
deleted file mode 100644
index 0d6b8cf9d1..0000000000
--- a/arch/ia64/kernel/mca_asm.S
+++ /dev/null
@@ -1,1123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * File: mca_asm.S
- * Purpose: assembly portion of the IA64 MCA handling
- *
- * Mods by cfleck to integrate into kernel build
- *
- * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com>
- * Added various stop bits to get a clean compile
- *
- * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
- * Added code to save INIT handoff state in pt_regs format,
- * switch to temp kstack, switch modes, jump to C INIT handler
- *
- * 2002-01-04 J.Hall <jenna.s.hall@intel.com>
- * Before entering virtual mode code:
- * 1. Check for TLB CPU error
- * 2. Restore current thread pointer to kr6
- * 3. Move stack ptr 16 bytes to conform to C calling convention
- *
- * 2004-11-12 Russ Anderson <rja@sgi.com>
- * Added per cpu MCA/INIT stack save areas.
- *
- * 2005-12-08 Keith Owens <kaos@sgi.com>
- * Use per cpu MCA/INIT stacks for all data.
- */
-#include <linux/threads.h>
-#include <linux/pgtable.h>
-
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-#include <asm/mca_asm.h>
-#include <asm/mca.h>
-
-#include "entry.h"
-
-#define GET_IA64_MCA_DATA(reg) \
- GET_THIS_PADDR(reg, ia64_mca_data) \
- ;; \
- ld8 reg=[reg]
-
- .global ia64_do_tlb_purge
- .global ia64_os_mca_dispatch
- .global ia64_os_init_on_kdump
- .global ia64_os_init_dispatch_monarch
- .global ia64_os_init_dispatch_slave
-
- .text
- .align 16
-
-//StartMain////////////////////////////////////////////////////////////////////
-
-/*
- * Just the TLB purge part is moved to a separate function
- * so we can re-use the code for cpu hotplug code as well
- * Caller should now setup b1, so we can branch once the
- * tlb flush is complete.
- */
-
-ia64_do_tlb_purge:
-#define O(member) IA64_CPUINFO_##member##_OFFSET
-
- GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
- ;;
- addl r17=O(PTCE_STRIDE),r2
- addl r2=O(PTCE_BASE),r2
- ;;
- ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
- ld4 r19=[r2],4 // r19=ptce_count[0]
- ld4 r21=[r17],4 // r21=ptce_stride[0]
- ;;
- ld4 r20=[r2] // r20=ptce_count[1]
- ld4 r22=[r17] // r22=ptce_stride[1]
- mov r24=0
- ;;
- adds r20=-1,r20
- ;;
-#undef O
-
-2:
- cmp.ltu p6,p7=r24,r19
-(p7) br.cond.dpnt.few 4f
- mov ar.lc=r20
-3:
- ptc.e r18
- ;;
- add r18=r22,r18
- br.cloop.sptk.few 3b
- ;;
- add r18=r21,r18
- add r24=1,r24
- ;;
- br.sptk.few 2b
-4:
- srlz.i // srlz.i implies srlz.d
- ;;
-
- // Now purge addresses formerly mapped by TR registers
- // 1. Purge ITR&DTR for kernel.
- movl r16=KERNEL_START
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- ;;
- ptr.i r16, r18
- ptr.d r16, r18
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
- // 3. Purge ITR for PAL code.
- GET_THIS_PADDR(r2, ia64_mca_pal_base)
- ;;
- ld8 r16=[r2]
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.i r16,r18
- ;;
- srlz.i
- ;;
- // 4. Purge DTR for stack.
- mov r16=IA64_KR(CURRENT_STACK)
- ;;
- shl r16=r16,IA64_GRANULE_SHIFT
- movl r19=PAGE_OFFSET
- ;;
- add r16=r19,r16
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.d r16,r18
- ;;
- srlz.i
- ;;
- // Now branch away to caller.
- br.sptk.many b1
- ;;
-
-//EndMain//////////////////////////////////////////////////////////////////////
-
-//StartMain////////////////////////////////////////////////////////////////////
-
-ia64_os_mca_dispatch:
- mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- mov r19=1 // All MCA events are treated as monarch (for now)
- br.sptk ia64_state_save // save the state that is not in minstate
-1:
-
- GET_IA64_MCA_DATA(r2)
- // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
- ;;
- add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
- ;;
- ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
- ;;
- tbit.nz p6,p7=r18,60
-(p7) br.spnt done_tlb_purge_and_reload
-
- // The following code purges TC and TR entries. Then reload all TC entries.
- // Purge percpu data TC entries.
-begin_tlb_purge_and_reload:
- movl r18=ia64_reload_tr;;
- LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
- mov b1=r18;;
- br.sptk.many ia64_do_tlb_purge;;
-
-ia64_reload_tr:
- // Finally reload the TR registers.
- // 1. Reload DTR/ITR registers for kernel.
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- movl r17=KERNEL_START
- ;;
- mov cr.itir=r18
- mov cr.ifa=r17
- mov r16=IA64_TR_KERNEL
- mov r19=ip
- movl r18=PAGE_KERNEL
- ;;
- dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
- ;;
- or r18=r17,r18
- ;;
- itr.i itr[r16]=r18
- ;;
- itr.d dtr[r16]=r18
- ;;
- srlz.i
- srlz.d
- ;;
- // 3. Reload ITR for PAL code.
- GET_THIS_PADDR(r2, ia64_mca_pal_pte)
- ;;
- ld8 r18=[r2] // load PAL PTE
- ;;
- GET_THIS_PADDR(r2, ia64_mca_pal_base)
- ;;
- ld8 r16=[r2] // load PAL vaddr
- mov r19=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r19
- mov cr.ifa=r16
- mov r20=IA64_TR_PALCODE
- ;;
- itr.i itr[r20]=r18
- ;;
- srlz.i
- ;;
- // 4. Reload DTR for stack.
- mov r16=IA64_KR(CURRENT_STACK)
- ;;
- shl r16=r16,IA64_GRANULE_SHIFT
- movl r19=PAGE_OFFSET
- ;;
- add r18=r19,r16
- movl r20=PAGE_KERNEL
- ;;
- add r16=r20,r16
- mov r19=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r19
- mov cr.ifa=r18
- mov r20=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r20]=r16
- GET_THIS_PADDR(r2, ia64_mca_tr_reload)
- mov r18 = 1
- ;;
- srlz.d
- ;;
- st8 [r2] =r18
- ;;
-
-done_tlb_purge_and_reload:
-
- // switch to per cpu MCA stack
- mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_new_stack
-1:
-
- // everything saved, now we can set the kernel registers
- mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_set_kernel_registers
-1:
-
- // This must be done in physical mode
- GET_IA64_MCA_DATA(r2)
- ;;
- mov r7=r2
-
- // Enter virtual mode from physical mode
- VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
-
- // This code returns to SAL via SOS r2, in general SAL has no unwind
- // data. To get a clean termination when backtracing the C MCA/INIT
- // handler, set a dummy return address of 0 in this routine. That
- // requires that ia64_os_mca_virtual_begin be a global function.
-ENTRY(ia64_os_mca_virtual_begin)
- .prologue
- .save rp,r0
- .body
-
- mov ar.rsc=3 // set eager mode for C handler
- mov r2=r7 // see GET_IA64_MCA_DATA above
- ;;
-
- // Call virtual mode handler
- alloc r14=ar.pfs,0,0,3,0
- ;;
- DATA_PA_TO_VA(r2,r7)
- ;;
- add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
- add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
- add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
- br.call.sptk.many b0=ia64_mca_handler
-
- // Revert back to physical mode before going back to SAL
- PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
-ia64_os_mca_virtual_end:
-
-END(ia64_os_mca_virtual_begin)
-
- // switch back to previous stack
- alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame
- mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_old_stack
-1:
-
- mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_state_restore // restore the SAL state
-1:
-
- mov b0=r12 // SAL_CHECK return address
-
- br b0
-
-//EndMain//////////////////////////////////////////////////////////////////////
-
-//StartMain////////////////////////////////////////////////////////////////////
-
-//
-// NOP init handler for kdump. In panic situation, we may receive INIT
-// while kernel transition. Since we initialize registers on leave from
-// current kernel, no longer monarch/slave handlers of current kernel in
-// virtual mode are called safely.
-// We can unregister these init handlers from SAL, however then the INIT
-// will result in warmboot by SAL and we cannot retrieve the crashdump.
-// Therefore register this NOP function to SAL, to prevent entering virtual
-// mode and resulting warmboot by SAL.
-//
-ia64_os_init_on_kdump:
- mov r8=r0 // IA64_INIT_RESUME
- mov r9=r10 // SAL_GP
- mov r22=r17 // *minstate
- ;;
- mov r10=r0 // return to same context
- mov b0=r12 // SAL_CHECK return address
- br b0
-
-//
-// SAL to OS entry point for INIT on all processors. This has been defined for
-// registration purposes with SAL as a part of ia64_mca_init. Monarch and
-// slave INIT have identical processing, except for the value of the
-// sos->monarch flag in r19.
-//
-
-ia64_os_init_dispatch_monarch:
- mov r19=1 // Bow, bow, ye lower middle classes!
- br.sptk ia64_os_init_dispatch
-
-ia64_os_init_dispatch_slave:
- mov r19=0 // <igor>yeth, mathter</igor>
-
-ia64_os_init_dispatch:
-
- mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_state_save // save the state that is not in minstate
-1:
-
- // switch to per cpu INIT stack
- mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_new_stack
-1:
-
- // everything saved, now we can set the kernel registers
- mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_set_kernel_registers
-1:
-
- // This must be done in physical mode
- GET_IA64_MCA_DATA(r2)
- ;;
- mov r7=r2
-
- // Enter virtual mode from physical mode
- VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
-
- // This code returns to SAL via SOS r2, in general SAL has no unwind
- // data. To get a clean termination when backtracing the C MCA/INIT
- // handler, set a dummy return address of 0 in this routine. That
- // requires that ia64_os_init_virtual_begin be a global function.
-ENTRY(ia64_os_init_virtual_begin)
- .prologue
- .save rp,r0
- .body
-
- mov ar.rsc=3 // set eager mode for C handler
- mov r2=r7 // see GET_IA64_MCA_DATA above
- ;;
-
- // Call virtual mode handler
- alloc r14=ar.pfs,0,0,3,0
- ;;
- DATA_PA_TO_VA(r2,r7)
- ;;
- add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
- add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
- add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
- br.call.sptk.many b0=ia64_init_handler
-
- // Revert back to physical mode before going back to SAL
- PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
-ia64_os_init_virtual_end:
-
-END(ia64_os_init_virtual_begin)
-
- mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_state_restore // restore the SAL state
-1:
-
- // switch back to previous stack
- alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame
- mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
- LOAD_PHYSICAL(p0,r2,1f) // return address
- br.sptk ia64_old_stack
-1:
-
- mov b0=r12 // SAL_CHECK return address
- br b0
-
-//EndMain//////////////////////////////////////////////////////////////////////
-
-// common defines for the stubs
-#define ms r4
-#define regs r5
-#define temp1 r2 /* careful, it overlaps with input registers */
-#define temp2 r3 /* careful, it overlaps with input registers */
-#define temp3 r7
-#define temp4 r14
-
-
-//++
-// Name:
-// ia64_state_save()
-//
-// Stub Description:
-//
-// Save the state that is not in minstate. This is sensitive to the layout of
-// struct ia64_sal_os_state in mca.h.
-//
-// r2 contains the return address, r3 contains either
-// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
-//
-// The OS to SAL section of struct ia64_sal_os_state is set to a default
-// value of cold boot (MCA) or warm boot (INIT) and return to the same
-// context. ia64_sal_os_state is also used to hold some registers that
-// need to be saved and restored across the stack switches.
-//
-// Most input registers to this stub come from PAL/SAL
-// r1 os gp, physical
-// r8 pal_proc entry point
-// r9 sal_proc entry point
-// r10 sal gp
-// r11 MCA - rendevzous state, INIT - reason code
-// r12 sal return address
-// r17 pal min_state
-// r18 processor state parameter
-// r19 monarch flag, set by the caller of this routine
-//
-// In addition to the SAL to OS state, this routine saves all the
-// registers that appear in struct pt_regs and struct switch_stack,
-// excluding those that are already in the PAL minstate area. This
-// results in a partial pt_regs and switch_stack, the C code copies the
-// remaining registers from PAL minstate to pt_regs and switch_stack. The
-// resulting structures contain all the state of the original process when
-// MCA/INIT occurred.
-//
-//--
-
-ia64_state_save:
- add regs=MCA_SOS_OFFSET, r3
- add ms=MCA_SOS_OFFSET+8, r3
- mov b0=r2 // save return address
- cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
- ;;
- GET_IA64_MCA_DATA(temp2)
- ;;
- add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack
- add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack
- ;;
- mov regs=temp1 // save the start of sos
- st8 [temp1]=r1,16 // os_gp
- st8 [temp2]=r8,16 // pal_proc
- ;;
- st8 [temp1]=r9,16 // sal_proc
- st8 [temp2]=r11,16 // rv_rc
- mov r11=cr.iipa
- ;;
- st8 [temp1]=r18 // proc_state_param
- st8 [temp2]=r19 // monarch
- mov r6=IA64_KR(CURRENT)
- add temp1=SOS(SAL_RA), regs
- add temp2=SOS(SAL_GP), regs
- ;;
- st8 [temp1]=r12,16 // sal_ra
- st8 [temp2]=r10,16 // sal_gp
- mov r12=cr.isr
- ;;
- st8 [temp1]=r17,16 // pal_min_state
- st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
- mov r6=IA64_KR(CURRENT_STACK)
- ;;
- st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
- st8 [temp2]=r0,16 // prev_task, starts off as NULL
- mov r6=cr.ifa
- ;;
- st8 [temp1]=r12,16 // cr.isr
- st8 [temp2]=r6,16 // cr.ifa
- mov r12=cr.itir
- ;;
- st8 [temp1]=r12,16 // cr.itir
- st8 [temp2]=r11,16 // cr.iipa
- mov r12=cr.iim
- ;;
- st8 [temp1]=r12 // cr.iim
-(p1) mov r12=IA64_MCA_COLD_BOOT
-(p2) mov r12=IA64_INIT_WARM_BOOT
- mov r6=cr.iha
- add temp1=SOS(OS_STATUS), regs
- ;;
- st8 [temp2]=r6 // cr.iha
- add temp2=SOS(CONTEXT), regs
- st8 [temp1]=r12 // os_status, default is cold boot
- mov r6=IA64_MCA_SAME_CONTEXT
- ;;
- st8 [temp2]=r6 // context, default is same context
-
- // Save the pt_regs data that is not in minstate. The previous code
- // left regs at sos.
- add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
- ;;
- add temp1=PT(B6), regs
- mov temp3=b6
- mov temp4=b7
- add temp2=PT(B7), regs
- ;;
- st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6
- st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7
- mov temp3=ar.csd
- mov temp4=ar.ssd
- cover // must be last in group
- ;;
- st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd
- st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd
- mov temp3=ar.unat
- mov temp4=ar.pfs
- ;;
- st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat
- st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs
- mov temp3=ar.rnat
- mov temp4=ar.bspstore
- ;;
- st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat
- st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore
- mov temp3=ar.bsp
- ;;
- sub temp3=temp3, temp4 // ar.bsp - ar.bspstore
- mov temp4=ar.fpsr
- ;;
- shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs"
- ;;
- st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs
- st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr
- mov temp3=ar.ccv
- ;;
- st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv
- stf.spill [temp2]=f6,PT(F8)-PT(F6)
- ;;
- stf.spill [temp1]=f7,PT(F9)-PT(F7)
- stf.spill [temp2]=f8,PT(F10)-PT(F8)
- ;;
- stf.spill [temp1]=f9,PT(F11)-PT(F9)
- stf.spill [temp2]=f10
- ;;
- stf.spill [temp1]=f11
-
- // Save the switch_stack data that is not in minstate nor pt_regs. The
- // previous code left regs at pt_regs.
- add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
- ;;
- add temp1=SW(F2), regs
- add temp2=SW(F3), regs
- ;;
- stf.spill [temp1]=f2,32
- stf.spill [temp2]=f3,32
- ;;
- stf.spill [temp1]=f4,32
- stf.spill [temp2]=f5,32
- ;;
- stf.spill [temp1]=f12,32
- stf.spill [temp2]=f13,32
- ;;
- stf.spill [temp1]=f14,32
- stf.spill [temp2]=f15,32
- ;;
- stf.spill [temp1]=f16,32
- stf.spill [temp2]=f17,32
- ;;
- stf.spill [temp1]=f18,32
- stf.spill [temp2]=f19,32
- ;;
- stf.spill [temp1]=f20,32
- stf.spill [temp2]=f21,32
- ;;
- stf.spill [temp1]=f22,32
- stf.spill [temp2]=f23,32
- ;;
- stf.spill [temp1]=f24,32
- stf.spill [temp2]=f25,32
- ;;
- stf.spill [temp1]=f26,32
- stf.spill [temp2]=f27,32
- ;;
- stf.spill [temp1]=f28,32
- stf.spill [temp2]=f29,32
- ;;
- stf.spill [temp1]=f30,SW(B2)-SW(F30)
- stf.spill [temp2]=f31,SW(B3)-SW(F31)
- mov temp3=b2
- mov temp4=b3
- ;;
- st8 [temp1]=temp3,16 // save b2
- st8 [temp2]=temp4,16 // save b3
- mov temp3=b4
- mov temp4=b5
- ;;
- st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4
- st8 [temp2]=temp4 // save b5
- mov temp3=ar.lc
- ;;
- st8 [temp1]=temp3 // save ar.lc
-
- // FIXME: Some proms are incorrectly accessing the minstate area as
- // cached data. The C code uses region 6, uncached virtual. Ensure
- // that there is no cache data lying around for the first 1K of the
- // minstate area.
- // Remove this code in September 2006, that gives platforms a year to
- // fix their proms and get their customers updated.
-
- add r1=32*1,r17
- add r2=32*2,r17
- add r3=32*3,r17
- add r4=32*4,r17
- add r5=32*5,r17
- add r6=32*6,r17
- add r7=32*7,r17
- ;;
- fc r17
- fc r1
- fc r2
- fc r3
- fc r4
- fc r5
- fc r6
- fc r7
- add r17=32*8,r17
- add r1=32*8,r1
- add r2=32*8,r2
- add r3=32*8,r3
- add r4=32*8,r4
- add r5=32*8,r5
- add r6=32*8,r6
- add r7=32*8,r7
- ;;
- fc r17
- fc r1
- fc r2
- fc r3
- fc r4
- fc r5
- fc r6
- fc r7
- add r17=32*8,r17
- add r1=32*8,r1
- add r2=32*8,r2
- add r3=32*8,r3
- add r4=32*8,r4
- add r5=32*8,r5
- add r6=32*8,r6
- add r7=32*8,r7
- ;;
- fc r17
- fc r1
- fc r2
- fc r3
- fc r4
- fc r5
- fc r6
- fc r7
- add r17=32*8,r17
- add r1=32*8,r1
- add r2=32*8,r2
- add r3=32*8,r3
- add r4=32*8,r4
- add r5=32*8,r5
- add r6=32*8,r6
- add r7=32*8,r7
- ;;
- fc r17
- fc r1
- fc r2
- fc r3
- fc r4
- fc r5
- fc r6
- fc r7
-
- br.sptk b0
-
-//EndStub//////////////////////////////////////////////////////////////////////
-
-
-//++
-// Name:
-// ia64_state_restore()
-//
-// Stub Description:
-//
-// Restore the SAL/OS state. This is sensitive to the layout of struct
-// ia64_sal_os_state in mca.h.
-//
-// r2 contains the return address, r3 contains either
-// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
-//
-// In addition to the SAL to OS state, this routine restores all the
-// registers that appear in struct pt_regs and struct switch_stack,
-// excluding those in the PAL minstate area.
-//
-//--
-
-ia64_state_restore:
- // Restore the switch_stack data that is not in minstate nor pt_regs.
- add regs=MCA_SWITCH_STACK_OFFSET, r3
- mov b0=r2 // save return address
- ;;
- GET_IA64_MCA_DATA(temp2)
- ;;
- add regs=temp2, regs
- ;;
- add temp1=SW(F2), regs
- add temp2=SW(F3), regs
- ;;
- ldf.fill f2=[temp1],32
- ldf.fill f3=[temp2],32
- ;;
- ldf.fill f4=[temp1],32
- ldf.fill f5=[temp2],32
- ;;
- ldf.fill f12=[temp1],32
- ldf.fill f13=[temp2],32
- ;;
- ldf.fill f14=[temp1],32
- ldf.fill f15=[temp2],32
- ;;
- ldf.fill f16=[temp1],32
- ldf.fill f17=[temp2],32
- ;;
- ldf.fill f18=[temp1],32
- ldf.fill f19=[temp2],32
- ;;
- ldf.fill f20=[temp1],32
- ldf.fill f21=[temp2],32
- ;;
- ldf.fill f22=[temp1],32
- ldf.fill f23=[temp2],32
- ;;
- ldf.fill f24=[temp1],32
- ldf.fill f25=[temp2],32
- ;;
- ldf.fill f26=[temp1],32
- ldf.fill f27=[temp2],32
- ;;
- ldf.fill f28=[temp1],32
- ldf.fill f29=[temp2],32
- ;;
- ldf.fill f30=[temp1],SW(B2)-SW(F30)
- ldf.fill f31=[temp2],SW(B3)-SW(F31)
- ;;
- ld8 temp3=[temp1],16 // restore b2
- ld8 temp4=[temp2],16 // restore b3
- ;;
- mov b2=temp3
- mov b3=temp4
- ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4
- ld8 temp4=[temp2] // restore b5
- ;;
- mov b4=temp3
- mov b5=temp4
- ld8 temp3=[temp1] // restore ar.lc
- ;;
- mov ar.lc=temp3
-
- // Restore the pt_regs data that is not in minstate. The previous code
- // left regs at switch_stack.
- add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
- ;;
- add temp1=PT(B6), regs
- add temp2=PT(B7), regs
- ;;
- ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6
- ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7
- ;;
- mov b6=temp3
- mov b7=temp4
- ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd
- ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd
- ;;
- mov ar.csd=temp3
- mov ar.ssd=temp4
- ld8 temp3=[temp1] // restore ar.unat
- add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
- ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs
- ;;
- mov ar.unat=temp3
- mov ar.pfs=temp4
- // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
- ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv
- ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr
- ;;
- mov ar.ccv=temp3
- mov ar.fpsr=temp4
- ldf.fill f6=[temp1],PT(F8)-PT(F6)
- ldf.fill f7=[temp2],PT(F9)-PT(F7)
- ;;
- ldf.fill f8=[temp1],PT(F10)-PT(F8)
- ldf.fill f9=[temp2],PT(F11)-PT(F9)
- ;;
- ldf.fill f10=[temp1]
- ldf.fill f11=[temp2]
-
- // Restore the SAL to OS state. The previous code left regs at pt_regs.
- add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
- ;;
- add temp1=SOS(SAL_RA), regs
- add temp2=SOS(SAL_GP), regs
- ;;
- ld8 r12=[temp1],16 // sal_ra
- ld8 r9=[temp2],16 // sal_gp
- ;;
- ld8 r22=[temp1],16 // pal_min_state, virtual
- ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
- ;;
- ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
- ld8 r20=[temp2],16 // prev_task
- ;;
- ld8 temp3=[temp1],16 // cr.isr
- ld8 temp4=[temp2],16 // cr.ifa
- ;;
- mov cr.isr=temp3
- mov cr.ifa=temp4
- ld8 temp3=[temp1],16 // cr.itir
- ld8 temp4=[temp2],16 // cr.iipa
- ;;
- mov cr.itir=temp3
- mov cr.iipa=temp4
- ld8 temp3=[temp1] // cr.iim
- ld8 temp4=[temp2] // cr.iha
- add temp1=SOS(OS_STATUS), regs
- add temp2=SOS(CONTEXT), regs
- ;;
- mov cr.iim=temp3
- mov cr.iha=temp4
- dep r22=0,r22,62,1 // pal_min_state, physical, uncached
- mov IA64_KR(CURRENT)=r13
- ld8 r8=[temp1] // os_status
- ld8 r10=[temp2] // context
-
- /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
- * avoid any dependencies on the algorithm in ia64_switch_to(), just
- * purge any existing CURRENT_STACK mapping and insert the new one.
- *
- * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
- * prev_IA64_KR_CURRENT, these values may have been changed by the C
- * code. Do not use r8, r9, r10, r22, they contain values ready for
- * the return to SAL.
- */
-
- mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
- ;;
- shl r15=r15,IA64_GRANULE_SHIFT
- ;;
- dep r15=-1,r15,61,3 // virtual granule
- mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
- ;;
- ptr.d r15,r18
- ;;
- srlz.d
-
- extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
- shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
- movl r21=PAGE_KERNEL // page properties
- ;;
- mov IA64_KR(CURRENT_STACK)=r16
- cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
- or r21=r20,r21 // construct PA | page properties
-(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
- ;;
- mov cr.itir=r18
- mov cr.ifa=r13
- mov r20=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r20]=r21
- ;;
- srlz.d
-1:
-
- br.sptk b0
-
-//EndStub//////////////////////////////////////////////////////////////////////
-
-
-//++
-// Name:
-// ia64_new_stack()
-//
-// Stub Description:
-//
-// Switch to the MCA/INIT stack.
-//
-// r2 contains the return address, r3 contains either
-// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
-//
-// On entry RBS is still on the original stack, this routine switches RBS
-// to use the MCA/INIT stack.
-//
-// On entry, sos->pal_min_state is physical, on exit it is virtual.
-//
-//--
-
-ia64_new_stack:
- add regs=MCA_PT_REGS_OFFSET, r3
- add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
- mov b0=r2 // save return address
- GET_IA64_MCA_DATA(temp1)
- invala
- ;;
- add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
- add regs=regs, temp1 // struct pt_regs on MCA or INIT stack
- ;;
- // Address of minstate area provided by PAL is physical, uncacheable.
- // Convert to Linux virtual address in region 6 for C code.
- ld8 ms=[temp2] // pal_min_state, physical
- ;;
- dep temp1=-1,ms,62,2 // set region 6
- mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
- ;;
- st8 [temp2]=temp1 // pal_min_state, virtual
-
- add temp4=temp3, regs // start of bspstore on new stack
- ;;
- mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack
- ;;
- flushrs // must be first in group
- br.sptk b0
-
-//EndStub//////////////////////////////////////////////////////////////////////
-
-
-//++
-// Name:
-// ia64_old_stack()
-//
-// Stub Description:
-//
-// Switch to the old stack.
-//
-// r2 contains the return address, r3 contains either
-// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
-//
-// On entry, pal_min_state is virtual, on exit it is physical.
-//
-// On entry RBS is on the MCA/INIT stack, this routine switches RBS
-// back to the previous stack.
-//
-// The psr is set to all zeroes. SAL return requires either all zeroes or
-// just psr.mc set. Leaving psr.mc off allows INIT to be issued if this
-// code does not perform correctly.
-//
-// The dirty registers at the time of the event were flushed to the
-// MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers
-// before reverting to the previous bspstore.
-//--
-
-ia64_old_stack:
- add regs=MCA_PT_REGS_OFFSET, r3
- mov b0=r2 // save return address
- GET_IA64_MCA_DATA(temp2)
- LOAD_PHYSICAL(p0,temp1,1f)
- ;;
- mov cr.ipsr=r0
- mov cr.ifs=r0
- mov cr.iip=temp1
- ;;
- invala
- rfi
-1:
-
- add regs=regs, temp2 // struct pt_regs on MCA or INIT stack
- ;;
- add temp1=PT(LOADRS), regs
- ;;
- ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs
- ;;
- ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore
- mov ar.rsc=temp2
- ;;
- loadrs
- ld8 temp4=[temp1] // restore ar.rnat
- ;;
- mov ar.bspstore=temp3 // back to old stack
- ;;
- mov ar.rnat=temp4
- ;;
-
- br.sptk b0
-
-//EndStub//////////////////////////////////////////////////////////////////////
-
-
-//++
-// Name:
-// ia64_set_kernel_registers()
-//
-// Stub Description:
-//
-// Set the registers that are required by the C code in order to run on an
-// MCA/INIT stack.
-//
-// r2 contains the return address, r3 contains either
-// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
-//
-//--
-
-ia64_set_kernel_registers:
- add temp3=MCA_SP_OFFSET, r3
- mov b0=r2 // save return address
- GET_IA64_MCA_DATA(temp1)
- ;;
- add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
- add r13=temp1, r3 // set current to start of MCA/INIT stack
- add r20=temp1, r3 // physical start of MCA/INIT stack
- ;;
- DATA_PA_TO_VA(r12,temp2)
- DATA_PA_TO_VA(r13,temp3)
- ;;
- mov IA64_KR(CURRENT)=r13
-
- /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
- * any dependencies on the algorithm in ia64_switch_to(), just purge
- * any existing CURRENT_STACK mapping and insert the new one.
- */
-
- mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
- ;;
- shl r16=r16,IA64_GRANULE_SHIFT
- ;;
- dep r16=-1,r16,61,3 // virtual granule
- mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
- ;;
- ptr.d r16,r18
- ;;
- srlz.d
-
- shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
- movl r21=PAGE_KERNEL // page properties
- ;;
- mov IA64_KR(CURRENT_STACK)=r16
- or r21=r20,r21 // construct PA | page properties
- ;;
- mov cr.itir=r18
- mov cr.ifa=r13
- mov r20=IA64_TR_CURRENT_STACK
-
- movl r17=FPSR_DEFAULT
- ;;
- mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
- ;;
- itr.d dtr[r20]=r21
- ;;
- srlz.d
-
- br.sptk b0
-
-//EndStub//////////////////////////////////////////////////////////////////////
-
-#undef ms
-#undef regs
-#undef temp1
-#undef temp2
-#undef temp3
-#undef temp4
-
-
-// Support function for mca.c, it is here to avoid using inline asm. Given the
-// address of an rnat slot, if that address is below the current ar.bspstore
-// then return the contents of that slot, otherwise return the contents of
-// ar.rnat.
-GLOBAL_ENTRY(ia64_get_rnat)
- alloc r14=ar.pfs,1,0,0,0
- mov ar.rsc=0
- ;;
- mov r14=ar.bspstore
- ;;
- cmp.lt p6,p7=in0,r14
- ;;
-(p6) ld8 r8=[in0]
-(p7) mov r8=ar.rnat
- mov ar.rsc=3
- br.ret.sptk.many rp
-END(ia64_get_rnat)
-
-
-// void ia64_set_psr_mc(void)
-//
-// Set psr.mc bit to mask MCA/INIT.
-GLOBAL_ENTRY(ia64_set_psr_mc)
- rsm psr.i | psr.ic // disable interrupts
- ;;
- srlz.d
- ;;
- mov r14 = psr // get psr{36:35,31:0}
- movl r15 = 1f
- ;;
- dep r14 = -1, r14, PSR_MC, 1 // set psr.mc
- ;;
- dep r14 = -1, r14, PSR_IC, 1 // set psr.ic
- ;;
- dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use
- ;;
- mov cr.ipsr = r14
- mov cr.ifs = r0
- mov cr.iip = r15
- ;;
- rfi
-1:
- br.ret.sptk.many rp
-END(ia64_set_psr_mc)
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
deleted file mode 100644
index 23c203639a..0000000000
--- a/arch/ia64/kernel/mca_drv.c
+++ /dev/null
@@ -1,796 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * File: mca_drv.c
- * Purpose: Generic MCA handling layer
- *
- * Copyright (C) 2004 FUJITSU LIMITED
- * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
- * Copyright (C) 2005 Silicon Graphics, Inc
- * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
- * Copyright (C) 2006 Russ Anderson <rja@sgi.com>
- */
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kallsyms.h>
-#include <linux/memblock.h>
-#include <linux/acpi.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/workqueue.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-#include <asm/delay.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/mca.h>
-
-#include <asm/irq.h>
-#include <asm/hw_irq.h>
-
-#include "mca_drv.h"
-
-/* max size of SAL error record (default) */
-static int sal_rec_max = 10000;
-
-/* from mca_drv_asm.S */
-extern void *mca_handler_bhhook(void);
-
-static DEFINE_SPINLOCK(mca_bh_lock);
-
-typedef enum {
- MCA_IS_LOCAL = 0,
- MCA_IS_GLOBAL = 1
-} mca_type_t;
-
-#define MAX_PAGE_ISOLATE 1024
-
-static struct page *page_isolate[MAX_PAGE_ISOLATE];
-static int num_page_isolate = 0;
-
-typedef enum {
- ISOLATE_NG,
- ISOLATE_OK,
- ISOLATE_NONE
-} isolate_status_t;
-
-typedef enum {
- MCA_NOT_RECOVERED = 0,
- MCA_RECOVERED = 1
-} recovery_status_t;
-
-/*
- * This pool keeps pointers to the section part of SAL error record
- */
-static struct {
- slidx_list_t *buffer; /* section pointer list pool */
- int cur_idx; /* Current index of section pointer list pool */
- int max_idx; /* Maximum index of section pointer list pool */
-} slidx_pool;
-
-static int
-fatal_mca(const char *fmt, ...)
-{
- va_list args;
- char buf[256];
-
- va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
- ia64_mca_printk(KERN_ALERT "MCA: %s\n", buf);
-
- return MCA_NOT_RECOVERED;
-}
-
-static int
-mca_recovered(const char *fmt, ...)
-{
- va_list args;
- char buf[256];
-
- va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
- ia64_mca_printk(KERN_INFO "MCA: %s\n", buf);
-
- return MCA_RECOVERED;
-}
-
-/**
- * mca_page_isolate - isolate a poisoned page in order not to use it later
- * @paddr: poisoned memory location
- *
- * Return value:
- * one of isolate_status_t, ISOLATE_OK/NG/NONE.
- */
-
-static isolate_status_t
-mca_page_isolate(unsigned long paddr)
-{
- int i;
- struct page *p;
-
- /* whether physical address is valid or not */
- if (!ia64_phys_addr_valid(paddr))
- return ISOLATE_NONE;
-
- if (!pfn_valid(paddr >> PAGE_SHIFT))
- return ISOLATE_NONE;
-
- /* convert physical address to physical page number */
- p = pfn_to_page(paddr>>PAGE_SHIFT);
-
- /* check whether a page number have been already registered or not */
- for (i = 0; i < num_page_isolate; i++)
- if (page_isolate[i] == p)
- return ISOLATE_OK; /* already listed */
-
- /* limitation check */
- if (num_page_isolate == MAX_PAGE_ISOLATE)
- return ISOLATE_NG;
-
- /* kick pages having attribute 'SLAB' or 'Reserved' */
- if (PageSlab(p) || PageReserved(p))
- return ISOLATE_NG;
-
- /* add attribute 'Reserved' and register the page */
- get_page(p);
- SetPageReserved(p);
- page_isolate[num_page_isolate++] = p;
-
- return ISOLATE_OK;
-}
-
-/**
- * mca_hanlder_bh - Kill the process which occurred memory read error
- * @paddr: poisoned address received from MCA Handler
- */
-
-void
-mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
-{
- ia64_mlogbuf_dump();
- printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
- "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
- raw_smp_processor_id(), current->pid,
- from_kuid(&init_user_ns, current_uid()),
- iip, ipsr, paddr, current->comm);
-
- spin_lock(&mca_bh_lock);
- switch (mca_page_isolate(paddr)) {
- case ISOLATE_OK:
- printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr);
- break;
- case ISOLATE_NG:
- printk(KERN_CRIT "Page isolation: ( %lx ) failure.\n", paddr);
- break;
- default:
- break;
- }
- spin_unlock(&mca_bh_lock);
-
- /* This process is about to be killed itself */
- make_task_dead(SIGKILL);
-}
-
-/**
- * mca_make_peidx - Make index of processor error section
- * @slpi: pointer to record of processor error section
- * @peidx: pointer to index of processor error section
- */
-
-static void
-mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx)
-{
- /*
- * calculate the start address of
- * "struct cpuid_info" and "sal_processor_static_info_t".
- */
- u64 total_check_num = slpi->valid.num_cache_check
- + slpi->valid.num_tlb_check
- + slpi->valid.num_bus_check
- + slpi->valid.num_reg_file_check
- + slpi->valid.num_ms_check;
- u64 head_size = sizeof(sal_log_mod_error_info_t) * total_check_num
- + sizeof(sal_log_processor_info_t);
- u64 mid_size = slpi->valid.cpuid_info * sizeof(struct sal_cpuid_info);
-
- peidx_head(peidx) = slpi;
- peidx_mid(peidx) = (struct sal_cpuid_info *)
- (slpi->valid.cpuid_info ? ((char*)slpi + head_size) : NULL);
- peidx_bottom(peidx) = (sal_processor_static_info_t *)
- (slpi->valid.psi_static_struct ?
- ((char*)slpi + head_size + mid_size) : NULL);
-}
-
-/**
- * mca_make_slidx - Make index of SAL error record
- * @buffer: pointer to SAL error record
- * @slidx: pointer to index of SAL error record
- *
- * Return value:
- * 1 if record has platform error / 0 if not
- */
-#define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \
- {slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \
- hl->hdr = ptr; \
- list_add(&hl->list, &(sect)); \
- slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; }
-
-static int
-mca_make_slidx(void *buffer, slidx_table_t *slidx)
-{
- int platform_err = 0;
- int record_len = ((sal_log_record_header_t*)buffer)->len;
- u32 ercd_pos;
- int sects;
- sal_log_section_hdr_t *sp;
-
- /*
- * Initialize index referring current record
- */
- INIT_LIST_HEAD(&(slidx->proc_err));
- INIT_LIST_HEAD(&(slidx->mem_dev_err));
- INIT_LIST_HEAD(&(slidx->sel_dev_err));
- INIT_LIST_HEAD(&(slidx->pci_bus_err));
- INIT_LIST_HEAD(&(slidx->smbios_dev_err));
- INIT_LIST_HEAD(&(slidx->pci_comp_err));
- INIT_LIST_HEAD(&(slidx->plat_specific_err));
- INIT_LIST_HEAD(&(slidx->host_ctlr_err));
- INIT_LIST_HEAD(&(slidx->plat_bus_err));
- INIT_LIST_HEAD(&(slidx->unsupported));
-
- /*
- * Extract a Record Header
- */
- slidx->header = buffer;
-
- /*
- * Extract each section records
- * (arranged from "int ia64_log_platform_info_print()")
- */
- for (ercd_pos = sizeof(sal_log_record_header_t), sects = 0;
- ercd_pos < record_len; ercd_pos += sp->len, sects++) {
- sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos);
- if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) {
- LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp);
- } else if (!efi_guidcmp(sp->guid,
- SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) {
- platform_err = 1;
- LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp);
- } else if (!efi_guidcmp(sp->guid,
- SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) {
- platform_err = 1;
- LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp);
- } else if (!efi_guidcmp(sp->guid,
- SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) {
- platform_err = 1;
- LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp);
- } else if (!efi_guidcmp(sp->guid,
- SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) {
- platform_err = 1;
- LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp);
- } else if (!efi_guidcmp(sp->guid,
- SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) {
- platform_err = 1;
- LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp);
- } else if (!efi_guidcmp(sp->guid,
- SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) {
- platform_err = 1;
- LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp);
- } else if (!efi_guidcmp(sp->guid,
- SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) {
- platform_err = 1;
- LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp);
- } else if (!efi_guidcmp(sp->guid,
- SAL_PLAT_BUS_ERR_SECT_GUID)) {
- platform_err = 1;
- LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp);
- } else {
- LOG_INDEX_ADD_SECT_PTR(slidx->unsupported, sp);
- }
- }
- slidx->n_sections = sects;
-
- return platform_err;
-}
-
-/**
- * init_record_index_pools - Initialize pool of lists for SAL record index
- *
- * Return value:
- * 0 on Success / -ENOMEM on Failure
- */
-static int
-init_record_index_pools(void)
-{
- int i;
- int rec_max_size; /* Maximum size of SAL error records */
- int sect_min_size; /* Minimum size of SAL error sections */
- /* minimum size table of each section */
- static int sal_log_sect_min_sizes[] = {
- sizeof(sal_log_processor_info_t)
- + sizeof(sal_processor_static_info_t),
- sizeof(sal_log_mem_dev_err_info_t),
- sizeof(sal_log_sel_dev_err_info_t),
- sizeof(sal_log_pci_bus_err_info_t),
- sizeof(sal_log_smbios_dev_err_info_t),
- sizeof(sal_log_pci_comp_err_info_t),
- sizeof(sal_log_plat_specific_err_info_t),
- sizeof(sal_log_host_ctlr_err_info_t),
- sizeof(sal_log_plat_bus_err_info_t),
- };
-
- /*
- * MCA handler cannot allocate new memory on flight,
- * so we preallocate enough memory to handle a SAL record.
- *
- * Initialize a handling set of slidx_pool:
- * 1. Pick up the max size of SAL error records
- * 2. Pick up the min size of SAL error sections
- * 3. Allocate the pool as enough to 2 SAL records
- * (now we can estimate the maxinum of section in a record.)
- */
-
- /* - 1 - */
- rec_max_size = sal_rec_max;
-
- /* - 2 - */
- sect_min_size = sal_log_sect_min_sizes[0];
- for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
- if (sect_min_size > sal_log_sect_min_sizes[i])
- sect_min_size = sal_log_sect_min_sizes[i];
-
- /* - 3 - */
- slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
- slidx_pool.buffer =
- kmalloc_array(slidx_pool.max_idx, sizeof(slidx_list_t),
- GFP_KERNEL);
-
- return slidx_pool.buffer ? 0 : -ENOMEM;
-}
-
-
-/*****************************************************************************
- * Recovery functions *
- *****************************************************************************/
-
-/**
- * is_mca_global - Check whether this MCA is global or not
- * @peidx: pointer of index of processor error section
- * @pbci: pointer to pal_bus_check_info_t
- * @sos: pointer to hand off struct between SAL and OS
- *
- * Return value:
- * MCA_IS_LOCAL / MCA_IS_GLOBAL
- */
-
-static mca_type_t
-is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
- struct ia64_sal_os_state *sos)
-{
- pal_processor_state_info_t *psp =
- (pal_processor_state_info_t*)peidx_psp(peidx);
-
- /*
- * PAL can request a rendezvous, if the MCA has a global scope.
- * If "rz_always" flag is set, SAL requests MCA rendezvous
- * in spite of global MCA.
- * Therefore it is local MCA when rendezvous has not been requested.
- * Failed to rendezvous, the system must be down.
- */
- switch (sos->rv_rc) {
- case -1: /* SAL rendezvous unsuccessful */
- return MCA_IS_GLOBAL;
- case 0: /* SAL rendezvous not required */
- return MCA_IS_LOCAL;
- case 1: /* SAL rendezvous successful int */
- case 2: /* SAL rendezvous successful int with init */
- default:
- break;
- }
-
- /*
- * If One or more Cache/TLB/Reg_File/Uarch_Check is here,
- * it would be a local MCA. (i.e. processor internal error)
- */
- if (psp->tc || psp->cc || psp->rc || psp->uc)
- return MCA_IS_LOCAL;
-
- /*
- * Bus_Check structure with Bus_Check.ib (internal bus error) flag set
- * would be a global MCA. (e.g. a system bus address parity error)
- */
- if (!pbci || pbci->ib)
- return MCA_IS_GLOBAL;
-
- /*
- * Bus_Check structure with Bus_Check.eb (external bus error) flag set
- * could be either a local MCA or a global MCA.
- *
- * Referring Bus_Check.bsi:
- * 0: Unknown/unclassified
- * 1: BERR#
- * 2: BINIT#
- * 3: Hard Fail
- * (FIXME: Are these SGI specific or generic bsi values?)
- */
- if (pbci->eb)
- switch (pbci->bsi) {
- case 0:
- /* e.g. a load from poisoned memory */
- return MCA_IS_LOCAL;
- case 1:
- case 2:
- case 3:
- return MCA_IS_GLOBAL;
- }
-
- return MCA_IS_GLOBAL;
-}
-
-/**
- * get_target_identifier - Get the valid Cache or Bus check target identifier.
- * @peidx: pointer of index of processor error section
- *
- * Return value:
- * target address on Success / 0 on Failure
- */
-static u64
-get_target_identifier(peidx_table_t *peidx)
-{
- u64 target_address = 0;
- sal_log_mod_error_info_t *smei;
- pal_cache_check_info_t *pcci;
- int i, level = 9;
-
- /*
- * Look through the cache checks for a valid target identifier
- * If more than one valid target identifier, return the one
- * with the lowest cache level.
- */
- for (i = 0; i < peidx_cache_check_num(peidx); i++) {
- smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i);
- if (smei->valid.target_identifier && smei->target_identifier) {
- pcci = (pal_cache_check_info_t *)&(smei->check_info);
- if (!target_address || (pcci->level < level)) {
- target_address = smei->target_identifier;
- level = pcci->level;
- continue;
- }
- }
- }
- if (target_address)
- return target_address;
-
- /*
- * Look at the bus check for a valid target identifier
- */
- smei = peidx_bus_check(peidx, 0);
- if (smei && smei->valid.target_identifier)
- return smei->target_identifier;
-
- return 0;
-}
-
-/**
- * recover_from_read_error - Try to recover the errors which type are "read"s.
- * @slidx: pointer of index of SAL error record
- * @peidx: pointer of index of processor error section
- * @pbci: pointer of pal_bus_check_info
- * @sos: pointer to hand off struct between SAL and OS
- *
- * Return value:
- * 1 on Success / 0 on Failure
- */
-
-static int
-recover_from_read_error(slidx_table_t *slidx,
- peidx_table_t *peidx, pal_bus_check_info_t *pbci,
- struct ia64_sal_os_state *sos)
-{
- u64 target_identifier;
- struct pal_min_state_area *pmsa;
- struct ia64_psr *psr1, *psr2;
- ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
-
- /* Is target address valid? */
- target_identifier = get_target_identifier(peidx);
- if (!target_identifier)
- return fatal_mca("target address not valid");
-
- /*
- * cpu read or memory-mapped io read
- *
- * offending process affected process OS MCA do
- * kernel mode kernel mode down system
- * kernel mode user mode kill the process
- * user mode kernel mode down system (*)
- * user mode user mode kill the process
- *
- * (*) You could terminate offending user-mode process
- * if (pbci->pv && pbci->pl != 0) *and* if you sure
- * the process not have any locks of kernel.
- */
-
- /* Is minstate valid? */
- if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
- return fatal_mca("minstate not valid");
- psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
- psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
-
- /*
- * Check the privilege level of interrupted context.
- * If it is user-mode, then terminate affected process.
- */
-
- pmsa = sos->pal_min_state;
- if (psr1->cpl != 0 ||
- ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
- /*
- * setup for resume to bottom half of MCA,
- * "mca_handler_bhhook"
- */
- /* pass to bhhook as argument (gr8, ...) */
- pmsa->pmsa_gr[8-1] = target_identifier;
- pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
- pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
- /* set interrupted return address (but no use) */
- pmsa->pmsa_br0 = pmsa->pmsa_iip;
- /* change resume address to bottom half */
- pmsa->pmsa_iip = mca_hdlr_bh->fp;
- pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
- /* set cpl with kernel mode */
- psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
- psr2->cpl = 0;
- psr2->ri = 0;
- psr2->bn = 1;
- psr2->i = 0;
-
- return mca_recovered("user memory corruption. "
- "kill affected process - recovered.");
- }
-
- return fatal_mca("kernel context not recovered, iip 0x%lx\n",
- pmsa->pmsa_iip);
-}
-
-/**
- * recover_from_platform_error - Recover from platform error.
- * @slidx: pointer of index of SAL error record
- * @peidx: pointer of index of processor error section
- * @pbci: pointer of pal_bus_check_info
- * @sos: pointer to hand off struct between SAL and OS
- *
- * Return value:
- * 1 on Success / 0 on Failure
- */
-
-static int
-recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx,
- pal_bus_check_info_t *pbci,
- struct ia64_sal_os_state *sos)
-{
- int status = 0;
- pal_processor_state_info_t *psp =
- (pal_processor_state_info_t*)peidx_psp(peidx);
-
- if (psp->bc && pbci->eb && pbci->bsi == 0) {
- switch(pbci->type) {
- case 1: /* partial read */
- case 3: /* full line(cpu) read */
- case 9: /* I/O space read */
- status = recover_from_read_error(slidx, peidx, pbci,
- sos);
- break;
- case 0: /* unknown */
- case 2: /* partial write */
- case 4: /* full line write */
- case 5: /* implicit or explicit write-back operation */
- case 6: /* snoop probe */
- case 7: /* incoming or outgoing ptc.g */
- case 8: /* write coalescing transactions */
- case 10: /* I/O space write */
- case 11: /* inter-processor interrupt message(IPI) */
- case 12: /* interrupt acknowledge or
- external task priority cycle */
- default:
- break;
- }
- } else if (psp->cc && !psp->bc) { /* Cache error */
- status = recover_from_read_error(slidx, peidx, pbci, sos);
- }
-
- return status;
-}
-
-/*
- * recover_from_tlb_check
- * @peidx: pointer of index of processor error section
- *
- * Return value:
- * 1 on Success / 0 on Failure
- */
-static int
-recover_from_tlb_check(peidx_table_t *peidx)
-{
- sal_log_mod_error_info_t *smei;
- pal_tlb_check_info_t *ptci;
-
- smei = (sal_log_mod_error_info_t *)peidx_tlb_check(peidx, 0);
- ptci = (pal_tlb_check_info_t *)&(smei->check_info);
-
- /*
- * Look for signature of a duplicate TLB DTC entry, which is
- * a SW bug and always fatal.
- */
- if (ptci->op == PAL_TLB_CHECK_OP_PURGE
- && !(ptci->itr || ptci->dtc || ptci->itc))
- return fatal_mca("Duplicate TLB entry");
-
- return mca_recovered("TLB check recovered");
-}
-
-/**
- * recover_from_processor_error
- * @platform: whether there are some platform error section or not
- * @slidx: pointer of index of SAL error record
- * @peidx: pointer of index of processor error section
- * @pbci: pointer of pal_bus_check_info
- * @sos: pointer to hand off struct between SAL and OS
- *
- * Return value:
- * 1 on Success / 0 on Failure
- */
-
-static int
-recover_from_processor_error(int platform, slidx_table_t *slidx,
- peidx_table_t *peidx, pal_bus_check_info_t *pbci,
- struct ia64_sal_os_state *sos)
-{
- pal_processor_state_info_t *psp =
- (pal_processor_state_info_t*)peidx_psp(peidx);
-
- /*
- * Processor recovery status must key off of the PAL recovery
- * status in the Processor State Parameter.
- */
-
- /*
- * The machine check is corrected.
- */
- if (psp->cm == 1)
- return mca_recovered("machine check is already corrected.");
-
- /*
- * The error was not contained. Software must be reset.
- */
- if (psp->us || psp->ci == 0)
- return fatal_mca("error not contained");
-
- /*
- * Look for recoverable TLB check
- */
- if (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
- return recover_from_tlb_check(peidx);
-
- /*
- * The cache check and bus check bits have four possible states
- * cc bc
- * 1 1 Memory error, attempt recovery
- * 1 0 Cache error, attempt recovery
- * 0 1 I/O error, attempt recovery
- * 0 0 Other error type, not recovered
- */
- if (psp->cc == 0 && (psp->bc == 0 || pbci == NULL))
- return fatal_mca("No cache or bus check");
-
- /*
- * Cannot handle more than one bus check.
- */
- if (peidx_bus_check_num(peidx) > 1)
- return fatal_mca("Too many bus checks");
-
- if (pbci->ib)
- return fatal_mca("Internal Bus error");
- if (pbci->eb && pbci->bsi > 0)
- return fatal_mca("External bus check fatal status");
-
- /*
- * This is a local MCA and estimated as a recoverable error.
- */
- if (platform)
- return recover_from_platform_error(slidx, peidx, pbci, sos);
-
- /*
- * On account of strange SAL error record, we cannot recover.
- */
- return fatal_mca("Strange SAL record");
-}
-
-/**
- * mca_try_to_recover - Try to recover from MCA
- * @rec: pointer to a SAL error record
- * @sos: pointer to hand off struct between SAL and OS
- *
- * Return value:
- * 1 on Success / 0 on Failure
- */
-
-static int
-mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
-{
- int platform_err;
- int n_proc_err;
- slidx_table_t slidx;
- peidx_table_t peidx;
- pal_bus_check_info_t pbci;
-
- /* Make index of SAL error record */
- platform_err = mca_make_slidx(rec, &slidx);
-
- /* Count processor error sections */
- n_proc_err = slidx_count(&slidx, proc_err);
-
- /* Now, OS can recover when there is one processor error section */
- if (n_proc_err > 1)
- return fatal_mca("Too Many Errors");
- else if (n_proc_err == 0)
- /* Weird SAL record ... We can't do anything */
- return fatal_mca("Weird SAL record");
-
- /* Make index of processor error section */
- mca_make_peidx((sal_log_processor_info_t*)
- slidx_first_entry(&slidx.proc_err)->hdr, &peidx);
-
- /* Extract Processor BUS_CHECK[0] */
- *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
-
- /* Check whether MCA is global or not */
- if (is_mca_global(&peidx, &pbci, sos))
- return fatal_mca("global MCA");
-
- /* Try to recover a processor error */
- return recover_from_processor_error(platform_err, &slidx, &peidx,
- &pbci, sos);
-}
-
-/*
- * =============================================================================
- */
-
-int __init mca_external_handler_init(void)
-{
- if (init_record_index_pools())
- return -ENOMEM;
-
- /* register external mca handlers */
- if (ia64_reg_MCA_extension(mca_try_to_recover)) {
- printk(KERN_ERR "ia64_reg_MCA_extension failed.\n");
- kfree(slidx_pool.buffer);
- return -EFAULT;
- }
- return 0;
-}
-
-void __exit mca_external_handler_exit(void)
-{
- /* unregister external mca handlers */
- ia64_unreg_MCA_extension();
- kfree(slidx_pool.buffer);
-}
-
-module_init(mca_external_handler_init);
-module_exit(mca_external_handler_exit);
-
-module_param(sal_rec_max, int, 0644);
-MODULE_PARM_DESC(sal_rec_max, "Max size of SAL error record");
-
-MODULE_DESCRIPTION("ia64 platform dependent mca handler driver");
-MODULE_LICENSE("GPL");
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h
deleted file mode 100644
index 45bc4e3ae1..0000000000
--- a/arch/ia64/kernel/mca_drv.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * File: mca_drv.h
- * Purpose: Define helpers for Generic MCA handling
- *
- * Copyright (C) 2004 FUJITSU LIMITED
- * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
- */
-/*
- * Processor error section:
- *
- * +-sal_log_processor_info_t *info-------------+
- * | sal_log_section_hdr_t header; |
- * | ... |
- * | sal_log_mod_error_info_t info[0]; |
- * +-+----------------+-------------------------+
- * | CACHE_CHECK | ^ num_cache_check v
- * +----------------+
- * | TLB_CHECK | ^ num_tlb_check v
- * +----------------+
- * | BUS_CHECK | ^ num_bus_check v
- * +----------------+
- * | REG_FILE_CHECK | ^ num_reg_file_check v
- * +----------------+
- * | MS_CHECK | ^ num_ms_check v
- * +-struct cpuid_info *id----------------------+
- * | regs[5]; |
- * | reserved; |
- * +-sal_processor_static_info_t *regs----------+
- * | valid; |
- * | ... |
- * | fr[128]; |
- * +--------------------------------------------+
- */
-
-/* peidx: index of processor error section */
-typedef struct peidx_table {
- sal_log_processor_info_t *info;
- struct sal_cpuid_info *id;
- sal_processor_static_info_t *regs;
-} peidx_table_t;
-
-#define peidx_head(p) (((p)->info))
-#define peidx_mid(p) (((p)->id))
-#define peidx_bottom(p) (((p)->regs))
-
-#define peidx_psp(p) (&(peidx_head(p)->proc_state_parameter))
-#define peidx_field_valid(p) (&(peidx_head(p)->valid))
-#define peidx_minstate_area(p) (&(peidx_bottom(p)->min_state_area))
-
-#define peidx_cache_check_num(p) (peidx_head(p)->valid.num_cache_check)
-#define peidx_tlb_check_num(p) (peidx_head(p)->valid.num_tlb_check)
-#define peidx_bus_check_num(p) (peidx_head(p)->valid.num_bus_check)
-#define peidx_reg_file_check_num(p) (peidx_head(p)->valid.num_reg_file_check)
-#define peidx_ms_check_num(p) (peidx_head(p)->valid.num_ms_check)
-
-#define peidx_cache_check_idx(p, n) (n)
-#define peidx_tlb_check_idx(p, n) (peidx_cache_check_idx(p, peidx_cache_check_num(p)) + n)
-#define peidx_bus_check_idx(p, n) (peidx_tlb_check_idx(p, peidx_tlb_check_num(p)) + n)
-#define peidx_reg_file_check_idx(p, n) (peidx_bus_check_idx(p, peidx_bus_check_num(p)) + n)
-#define peidx_ms_check_idx(p, n) (peidx_reg_file_check_idx(p, peidx_reg_file_check_num(p)) + n)
-
-#define peidx_mod_error_info(p, name, n) \
-({ int __idx = peidx_##name##_idx(p, n); \
- sal_log_mod_error_info_t *__ret = NULL; \
- if (peidx_##name##_num(p) > n) /*BUG*/ \
- __ret = &(peidx_head(p)->info[__idx]); \
- __ret; })
-
-#define peidx_cache_check(p, n) peidx_mod_error_info(p, cache_check, n)
-#define peidx_tlb_check(p, n) peidx_mod_error_info(p, tlb_check, n)
-#define peidx_bus_check(p, n) peidx_mod_error_info(p, bus_check, n)
-#define peidx_reg_file_check(p, n) peidx_mod_error_info(p, reg_file_check, n)
-#define peidx_ms_check(p, n) peidx_mod_error_info(p, ms_check, n)
-
-#define peidx_check_info(proc, name, n) \
-({ \
- sal_log_mod_error_info_t *__info = peidx_mod_error_info(proc, name, n);\
- u64 __temp = __info && __info->valid.check_info \
- ? __info->check_info : 0; \
- __temp; })
-
-/* slidx: index of SAL log error record */
-
-typedef struct slidx_list {
- struct list_head list;
- sal_log_section_hdr_t *hdr;
-} slidx_list_t;
-
-typedef struct slidx_table {
- sal_log_record_header_t *header;
- int n_sections; /* # of section headers */
- struct list_head proc_err;
- struct list_head mem_dev_err;
- struct list_head sel_dev_err;
- struct list_head pci_bus_err;
- struct list_head smbios_dev_err;
- struct list_head pci_comp_err;
- struct list_head plat_specific_err;
- struct list_head host_ctlr_err;
- struct list_head plat_bus_err;
- struct list_head unsupported; /* list of unsupported sections */
-} slidx_table_t;
-
-#define slidx_foreach_entry(pos, head) \
- list_for_each_entry(pos, head, list)
-#define slidx_first_entry(head) \
- (((head)->next != (head)) ? list_entry((head)->next, typeof(slidx_list_t), list) : NULL)
-#define slidx_count(slidx, sec) \
-({ int __count = 0; \
- slidx_list_t *__pos; \
- slidx_foreach_entry(__pos, &((slidx)->sec)) { __count++; }\
- __count; })
-
-struct mca_table_entry {
- int start_addr; /* location-relative starting address of MCA recoverable range */
- int end_addr; /* location-relative ending address of MCA recoverable range */
-};
-
-extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
-extern int mca_recover_range(unsigned long);
-extern void ia64_mlogbuf_dump(void);
-
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
deleted file mode 100644
index 4428f57bee..0000000000
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * File: mca_drv_asm.S
- * Purpose: Assembly portion of Generic MCA handling
- *
- * Copyright (C) 2004 FUJITSU LIMITED
- * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
- */
-#include <linux/threads.h>
-
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-
-GLOBAL_ENTRY(mca_handler_bhhook)
- invala // clear RSE ?
- cover
- ;;
- clrrrb
- ;;
- alloc r16=ar.pfs,0,2,3,0 // make a new frame
- mov ar.rsc=0
- mov r13=IA64_KR(CURRENT) // current task pointer
- ;;
- mov r2=r13
- ;;
- addl r22=IA64_RBS_OFFSET,r2
- ;;
- mov ar.bspstore=r22
- addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
- ;;
- adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
- ;;
- st1 [r2]=r0 // clear current->thread.on_ustack flag
- mov loc0=r16
- movl loc1=mca_handler_bh // recovery C function
- ;;
- mov out0=r8 // poisoned address
- mov out1=r9 // iip
- mov out2=r10 // psr
- mov b6=loc1
- ;;
- mov loc1=rp
- ssm psr.ic
- ;;
- srlz.i
- ;;
- ssm psr.i
- br.call.sptk.many rp=b6 // does not return ...
- ;;
- mov ar.pfs=loc0
- mov rp=loc1
- ;;
- mov r8=r0
- br.ret.sptk.many rp
-END(mca_handler_bhhook)
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
deleted file mode 100644
index d6eab2a108..0000000000
--- a/arch/ia64/kernel/minstate.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#include <asm/cache.h>
-
-#include "entry.h"
-#include <asm/native/inst.h>
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-/* read ar.itc in advance, and use it before leaving bank 0 */
-#define ACCOUNT_GET_STAMP \
-(pUStk) mov.m r20=ar.itc;
-#define ACCOUNT_SYS_ENTER \
-(pUStk) br.call.spnt rp=account_sys_enter \
- ;;
-#else
-#define ACCOUNT_GET_STAMP
-#define ACCOUNT_SYS_ENTER
-#endif
-
-.section ".data..patch.rse", "a"
-.previous
-
-/*
- * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- * psr.ic: off
- * r31: contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- * psr.ic: off
- * r2 = points to &pt_regs.r16
- * r8 = contents of ar.ccv
- * r9 = contents of ar.csd
- * r10 = contents of ar.ssd
- * r11 = FPSR_DEFAULT
- * r12 = kernel sp (kernel virtual address)
- * r13 = points to current task_struct (kernel virtual address)
- * p15 = TRUE if psr.i is set in cr.ipsr
- * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
- * preserved
- *
- * Note that psr.ic is NOT turned on by this macro. This is so that
- * we can pass interruption state as arguments to a handler.
- */
-#define IA64_NATIVE_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
- mov r16=IA64_KR(CURRENT); /* M */ \
- mov r27=ar.rsc; /* M */ \
- mov r20=r1; /* A */ \
- mov r25=ar.unat; /* M */ \
- MOV_FROM_IPSR(p0,r29); /* M */ \
- mov r26=ar.pfs; /* I */ \
- MOV_FROM_IIP(r28); /* M */ \
- mov r21=ar.fpsr; /* M */ \
- __COVER; /* B;; (or nothing) */ \
- ;; \
- adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
- ;; \
- ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
- st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
- adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
- /* switch from user to kernel RBS: */ \
- ;; \
- invala; /* M */ \
- SAVE_IFS; \
- cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
- ;; \
-(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
- ;; \
-(pUStk) mov.m r24=ar.rnat; \
-(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
-(pKStk) mov r1=sp; /* get sp */ \
- ;; \
-(pUStk) lfetch.fault.excl.nt1 [r22]; \
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
- ;; \
-(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
-(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
- ;; \
-(pUStk) mov r18=ar.bsp; \
-(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
- adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
- adds r16=PT(CR_IPSR),r1; \
- ;; \
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
- st8 [r16]=r29; /* save cr.ipsr */ \
- ;; \
- lfetch.fault.excl.nt1 [r17]; \
- tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
- mov r29=b0 \
- ;; \
- WORKAROUND; \
- adds r16=PT(R8),r1; /* initialize first base pointer */ \
- adds r17=PT(R9),r1; /* initialize second base pointer */ \
-(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r8,16; \
-.mem.offset 8,0; st8.spill [r17]=r9,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r10,24; \
-.mem.offset 8,0; st8.spill [r17]=r11,24; \
- ;; \
- st8 [r16]=r28,16; /* save cr.iip */ \
- st8 [r17]=r30,16; /* save cr.ifs */ \
-(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
- mov r8=ar.ccv; \
- mov r9=ar.csd; \
- mov r10=ar.ssd; \
- movl r11=FPSR_DEFAULT; /* L-unit */ \
- ;; \
- st8 [r16]=r25,16; /* save ar.unat */ \
- st8 [r17]=r26,16; /* save ar.pfs */ \
- shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
- ;; \
- st8 [r16]=r27,16; /* save ar.rsc */ \
-(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
-(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
- ;; /* avoid RAW on r16 & r17 */ \
-(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
- st8 [r17]=r31,16; /* save predicates */ \
-(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
- ;; \
- st8 [r16]=r29,16; /* save b0 */ \
- st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
- cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
-.mem.offset 8,0; st8.spill [r17]=r12,16; \
- adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r13,16; \
-.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
- mov r13=IA64_KR(CURRENT); /* establish `current' */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r15,16; \
-.mem.offset 8,0; st8.spill [r17]=r14,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r2,16; \
-.mem.offset 8,0; st8.spill [r17]=r3,16; \
- ACCOUNT_GET_STAMP \
- adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
- ;; \
- EXTRA; \
- movl r1=__gp; /* establish kernel global pointer */ \
- ;; \
- ACCOUNT_SYS_ENTER \
- bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
- ;;
-
-/*
- * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
- *
- * Assumed state upon entry:
- * psr.ic: on
- * r2: points to &pt_regs.r16
- * r3: points to &pt_regs.r17
- * r8: contents of ar.ccv
- * r9: contents of ar.csd
- * r10: contents of ar.ssd
- * r11: FPSR_DEFAULT
- *
- * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
- */
-#define SAVE_REST \
-.mem.offset 0,0; st8.spill [r2]=r16,16; \
-.mem.offset 8,0; st8.spill [r3]=r17,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r18,16; \
-.mem.offset 8,0; st8.spill [r3]=r19,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r20,16; \
-.mem.offset 8,0; st8.spill [r3]=r21,16; \
- mov r18=b6; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r22,16; \
-.mem.offset 8,0; st8.spill [r3]=r23,16; \
- mov r19=b7; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r24,16; \
-.mem.offset 8,0; st8.spill [r3]=r25,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r26,16; \
-.mem.offset 8,0; st8.spill [r3]=r27,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r28,16; \
-.mem.offset 8,0; st8.spill [r3]=r29,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r30,16; \
-.mem.offset 8,0; st8.spill [r3]=r31,32; \
- ;; \
- mov ar.fpsr=r11; /* M-unit */ \
- st8 [r2]=r8,8; /* ar.ccv */ \
- adds r24=PT(B6)-PT(F7),r3; \
- ;; \
- stf.spill [r2]=f6,32; \
- stf.spill [r3]=f7,32; \
- ;; \
- stf.spill [r2]=f8,32; \
- stf.spill [r3]=f9,32; \
- ;; \
- stf.spill [r2]=f10; \
- stf.spill [r3]=f11; \
- adds r25=PT(B7)-PT(F11),r3; \
- ;; \
- st8 [r24]=r18,16; /* b6 */ \
- st8 [r25]=r19,16; /* b7 */ \
- ;; \
- st8 [r24]=r9; /* ar.csd */ \
- st8 [r25]=r10; /* ar.ssd */ \
- ;;
-
-#define RSE_WORKAROUND \
-(pUStk) extr.u r17=r18,3,6; \
-(pUStk) sub r16=r18,r22; \
-[1:](pKStk) br.cond.sptk.many 1f; \
- .xdata4 ".data..patch.rse",1b-. \
- ;; \
- cmp.ge p6,p7 = 33,r17; \
- ;; \
-(p6) mov r17=0x310; \
-(p7) mov r17=0x308; \
- ;; \
- cmp.leu p1,p0=r16,r17; \
-(p1) br.cond.sptk.many 1f; \
- dep.z r17=r26,0,62; \
- movl r16=2f; \
- ;; \
- mov ar.pfs=r17; \
- dep r27=r0,r27,16,14; \
- mov b0=r16; \
- ;; \
- br.ret.sptk b0; \
- ;; \
-2: \
- mov ar.rsc=r0 \
- ;; \
- flushrs; \
- ;; \
- mov ar.bspstore=r22 \
- ;; \
- mov r18=ar.bsp; \
- ;; \
-1: \
- .pred.rel "mutex", pKStk, pUStk
-
-#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(COVER, mov r30=cr.ifs, , RSE_WORKAROUND)
-#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(COVER, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
-#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
deleted file mode 100644
index 3661135da9..0000000000
--- a/arch/ia64/kernel/module.c
+++ /dev/null
@@ -1,959 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * IA-64-specific support for kernel module loader.
- *
- * Copyright (C) 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Loosely based on patch by Rusty Russell.
- */
-
-/* relocs tested so far:
-
- DIR64LSB
- FPTR64LSB
- GPREL22
- LDXMOV
- LDXMOV
- LTOFF22
- LTOFF22X
- LTOFF22X
- LTOFF_FPTR22
- PCREL21B (for br.call only; br.cond is not supported out of modules!)
- PCREL60B (for brl.cond only; brl.call is not supported for modules!)
- PCREL64LSB
- SECREL32LSB
- SEGREL64LSB
- */
-
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/elf.h>
-#include <linux/moduleloader.h>
-#include <linux/string.h>
-#include <linux/vmalloc.h>
-
-#include <asm/patch.h>
-#include <asm/unaligned.h>
-#include <asm/sections.h>
-
-#define ARCH_MODULE_DEBUG 0
-
-#if ARCH_MODULE_DEBUG
-# define DEBUGP printk
-# define inline
-#else
-# define DEBUGP(fmt , a...)
-#endif
-
-#ifdef CONFIG_ITANIUM
-# define USE_BRL 0
-#else
-# define USE_BRL 1
-#endif
-
-#define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
-
-/* Define some relocation helper macros/types: */
-
-#define FORMAT_SHIFT 0
-#define FORMAT_BITS 3
-#define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
-#define VALUE_SHIFT 3
-#define VALUE_BITS 5
-#define VALUE_MASK ((1 << VALUE_BITS) - 1)
-
-enum reloc_target_format {
- /* direct encoded formats: */
- RF_NONE = 0,
- RF_INSN14 = 1,
- RF_INSN22 = 2,
- RF_INSN64 = 3,
- RF_32MSB = 4,
- RF_32LSB = 5,
- RF_64MSB = 6,
- RF_64LSB = 7,
-
- /* formats that cannot be directly decoded: */
- RF_INSN60,
- RF_INSN21B, /* imm21 form 1 */
- RF_INSN21M, /* imm21 form 2 */
- RF_INSN21F /* imm21 form 3 */
-};
-
-enum reloc_value_formula {
- RV_DIRECT = 4, /* S + A */
- RV_GPREL = 5, /* @gprel(S + A) */
- RV_LTREL = 6, /* @ltoff(S + A) */
- RV_PLTREL = 7, /* @pltoff(S + A) */
- RV_FPTR = 8, /* @fptr(S + A) */
- RV_PCREL = 9, /* S + A - P */
- RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
- RV_SEGREL = 11, /* @segrel(S + A) */
- RV_SECREL = 12, /* @secrel(S + A) */
- RV_BDREL = 13, /* BD + A */
- RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
- RV_PCREL2 = 15, /* S + A - P */
- RV_SPECIAL = 16, /* various (see below) */
- RV_RSVD17 = 17,
- RV_TPREL = 18, /* @tprel(S + A) */
- RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
- RV_DTPMOD = 20, /* @dtpmod(S + A) */
- RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
- RV_DTPREL = 22, /* @dtprel(S + A) */
- RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
- RV_RSVD24 = 24,
- RV_RSVD25 = 25,
- RV_RSVD26 = 26,
- RV_RSVD27 = 27
- /* 28-31 reserved for implementation-specific purposes. */
-};
-
-#define N(reloc) [R_IA64_##reloc] = #reloc
-
-static const char *reloc_name[256] = {
- N(NONE), N(IMM14), N(IMM22), N(IMM64),
- N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
- N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
- N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
- N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
- N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
- N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
- N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
- N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
- N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
- N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
- N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
- N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
- N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
- N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
- N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
- N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
- N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
- N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
- N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
-};
-
-#undef N
-
-/* Opaque struct for insns, to protect against derefs. */
-struct insn;
-
-static inline uint64_t
-bundle (const struct insn *insn)
-{
- return (uint64_t) insn & ~0xfUL;
-}
-
-static inline int
-slot (const struct insn *insn)
-{
- return (uint64_t) insn & 0x3;
-}
-
-static int
-apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
-{
- if (slot(insn) != 1 && slot(insn) != 2) {
- printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
- mod->name, slot(insn));
- return 0;
- }
- ia64_patch_imm64((u64) insn, val);
- return 1;
-}
-
-static int
-apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
-{
- if (slot(insn) != 1 && slot(insn) != 2) {
- printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
- mod->name, slot(insn));
- return 0;
- }
- if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
- printk(KERN_ERR "%s: value %ld out of IMM60 range\n",
- mod->name, (long) val);
- return 0;
- }
- ia64_patch_imm60((u64) insn, val);
- return 1;
-}
-
-static int
-apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
-{
- if (val + (1 << 21) >= (1 << 22)) {
- printk(KERN_ERR "%s: value %li out of IMM22 range\n",
- mod->name, (long)val);
- return 0;
- }
- ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
- | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
- | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
- | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
- return 1;
-}
-
-static int
-apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
-{
- if (val + (1 << 20) >= (1 << 21)) {
- printk(KERN_ERR "%s: value %li out of IMM21b range\n",
- mod->name, (long)val);
- return 0;
- }
- ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
- | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
- return 1;
-}
-
-#if USE_BRL
-
-struct plt_entry {
- /* Three instruction bundles in PLT. */
- unsigned char bundle[2][16];
-};
-
-static const struct plt_entry ia64_plt_template = {
- {
- {
- 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
- 0x00, 0x00, 0x00, 0x60
- },
- {
- 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
- 0x08, 0x00, 0x00, 0xc0
- }
- }
-};
-
-static int
-patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
-{
- if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
- && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
- (target_ip - (int64_t) plt->bundle[1]) / 16))
- return 1;
- return 0;
-}
-
-unsigned long
-plt_target (struct plt_entry *plt)
-{
- uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
- long off;
-
- b0 = b[0]; b1 = b[1];
- off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
- | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
- | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
- return (long) plt->bundle[1] + 16*off;
-}
-
-#else /* !USE_BRL */
-
-struct plt_entry {
- /* Three instruction bundles in PLT. */
- unsigned char bundle[3][16];
-};
-
-static const struct plt_entry ia64_plt_template = {
- {
- {
- 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
- 0x02, 0x00, 0x00, 0x60
- },
- {
- 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
- 0x00, 0x00, 0x00, 0x60
- },
- {
- 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
- 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
- 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
- }
- }
-};
-
-static int
-patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
-{
- if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
- && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
- return 1;
- return 0;
-}
-
-unsigned long
-plt_target (struct plt_entry *plt)
-{
- uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
-
- b0 = b[0]; b1 = b[1];
- return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
- | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
- | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
- | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
- | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
- | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
-}
-
-#endif /* !USE_BRL */
-
-void
-module_arch_freeing_init (struct module *mod)
-{
- if (mod->arch.init_unw_table) {
- unw_remove_unwind_table(mod->arch.init_unw_table);
- mod->arch.init_unw_table = NULL;
- }
-}
-
-/* Have we already seen one of these relocations? */
-/* FIXME: we could look in other sections, too --RR */
-static int
-duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
-{
- unsigned int i;
-
- for (i = 0; i < num; i++) {
- if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
- return 1;
- }
- return 0;
-}
-
-/* Count how many GOT entries we may need */
-static unsigned int
-count_gots (const Elf64_Rela *rela, unsigned int num)
-{
- unsigned int i, ret = 0;
-
- /* Sure, this is order(n^2), but it's usually short, and not
- time critical */
- for (i = 0; i < num; i++) {
- switch (ELF64_R_TYPE(rela[i].r_info)) {
- case R_IA64_LTOFF22:
- case R_IA64_LTOFF22X:
- case R_IA64_LTOFF64I:
- case R_IA64_LTOFF_FPTR22:
- case R_IA64_LTOFF_FPTR64I:
- case R_IA64_LTOFF_FPTR32MSB:
- case R_IA64_LTOFF_FPTR32LSB:
- case R_IA64_LTOFF_FPTR64MSB:
- case R_IA64_LTOFF_FPTR64LSB:
- if (!duplicate_reloc(rela, i))
- ret++;
- break;
- }
- }
- return ret;
-}
-
-/* Count how many PLT entries we may need */
-static unsigned int
-count_plts (const Elf64_Rela *rela, unsigned int num)
-{
- unsigned int i, ret = 0;
-
- /* Sure, this is order(n^2), but it's usually short, and not
- time critical */
- for (i = 0; i < num; i++) {
- switch (ELF64_R_TYPE(rela[i].r_info)) {
- case R_IA64_PCREL21B:
- case R_IA64_PLTOFF22:
- case R_IA64_PLTOFF64I:
- case R_IA64_PLTOFF64MSB:
- case R_IA64_PLTOFF64LSB:
- case R_IA64_IPLTMSB:
- case R_IA64_IPLTLSB:
- if (!duplicate_reloc(rela, i))
- ret++;
- break;
- }
- }
- return ret;
-}
-
-/* We need to create an function-descriptors for any internal function
- which is referenced. */
-static unsigned int
-count_fdescs (const Elf64_Rela *rela, unsigned int num)
-{
- unsigned int i, ret = 0;
-
- /* Sure, this is order(n^2), but it's usually short, and not time critical. */
- for (i = 0; i < num; i++) {
- switch (ELF64_R_TYPE(rela[i].r_info)) {
- case R_IA64_FPTR64I:
- case R_IA64_FPTR32LSB:
- case R_IA64_FPTR32MSB:
- case R_IA64_FPTR64LSB:
- case R_IA64_FPTR64MSB:
- case R_IA64_LTOFF_FPTR22:
- case R_IA64_LTOFF_FPTR32LSB:
- case R_IA64_LTOFF_FPTR32MSB:
- case R_IA64_LTOFF_FPTR64I:
- case R_IA64_LTOFF_FPTR64LSB:
- case R_IA64_LTOFF_FPTR64MSB:
- case R_IA64_IPLTMSB:
- case R_IA64_IPLTLSB:
- /*
- * Jumps to static functions sometimes go straight to their
- * offset. Of course, that may not be possible if the jump is
- * from init -> core or vice. versa, so we need to generate an
- * FDESC (and PLT etc) for that.
- */
- case R_IA64_PCREL21B:
- if (!duplicate_reloc(rela, i))
- ret++;
- break;
- }
- }
- return ret;
-}
-
-int
-module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
- struct module *mod)
-{
- unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
- Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
-
- /*
- * To store the PLTs and function-descriptors, we expand the .text section for
- * core module-code and the .init.text section for initialization code.
- */
- for (s = sechdrs; s < sechdrs_end; ++s)
- if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
- mod->arch.core_plt = s;
- else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
- mod->arch.init_plt = s;
- else if (strcmp(".got", secstrings + s->sh_name) == 0)
- mod->arch.got = s;
- else if (strcmp(".opd", secstrings + s->sh_name) == 0)
- mod->arch.opd = s;
- else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
- mod->arch.unwind = s;
-
- if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
- printk(KERN_ERR "%s: sections missing\n", mod->name);
- return -ENOEXEC;
- }
-
- /* GOT and PLTs can occur in any relocated section... */
- for (s = sechdrs + 1; s < sechdrs_end; ++s) {
- const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
- unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
-
- if (s->sh_type != SHT_RELA)
- continue;
-
- gots += count_gots(rels, numrels);
- fdescs += count_fdescs(rels, numrels);
- if (strstr(secstrings + s->sh_name, ".init"))
- init_plts += count_plts(rels, numrels);
- else
- core_plts += count_plts(rels, numrels);
- }
-
- mod->arch.core_plt->sh_type = SHT_NOBITS;
- mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.core_plt->sh_addralign = 16;
- mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
- mod->arch.init_plt->sh_type = SHT_NOBITS;
- mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.init_plt->sh_addralign = 16;
- mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
- mod->arch.got->sh_type = SHT_NOBITS;
- mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
- mod->arch.got->sh_addralign = 8;
- mod->arch.got->sh_size = gots * sizeof(struct got_entry);
- mod->arch.opd->sh_type = SHT_NOBITS;
- mod->arch.opd->sh_flags = SHF_ALLOC;
- mod->arch.opd->sh_addralign = 8;
- mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
- DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
- __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
- mod->arch.got->sh_size, mod->arch.opd->sh_size);
- return 0;
-}
-
-static inline bool
-in_init (const struct module *mod, uint64_t addr)
-{
- return within_module_init(addr, mod);
-}
-
-static inline bool
-in_core (const struct module *mod, uint64_t addr)
-{
- return within_module_core(addr, mod);
-}
-
-static inline bool
-is_internal (const struct module *mod, uint64_t value)
-{
- return in_init(mod, value) || in_core(mod, value);
-}
-
-/*
- * Get gp-relative offset for the linkage-table entry of VALUE.
- */
-static uint64_t
-get_ltoff (struct module *mod, uint64_t value, int *okp)
-{
- struct got_entry *got, *e;
-
- if (!*okp)
- return 0;
-
- got = (void *) mod->arch.got->sh_addr;
- for (e = got; e < got + mod->arch.next_got_entry; ++e)
- if (e->val == value)
- goto found;
-
- /* Not enough GOT entries? */
- BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
-
- e->val = value;
- ++mod->arch.next_got_entry;
- found:
- return (uint64_t) e - mod->arch.gp;
-}
-
-static inline int
-gp_addressable (struct module *mod, uint64_t value)
-{
- return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
-}
-
-/* Get PC-relative PLT entry for this value. Returns 0 on failure. */
-static uint64_t
-get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
-{
- struct plt_entry *plt, *plt_end;
- uint64_t target_ip, target_gp;
-
- if (!*okp)
- return 0;
-
- if (in_init(mod, (uint64_t) insn)) {
- plt = (void *) mod->arch.init_plt->sh_addr;
- plt_end = (void *) plt + mod->arch.init_plt->sh_size;
- } else {
- plt = (void *) mod->arch.core_plt->sh_addr;
- plt_end = (void *) plt + mod->arch.core_plt->sh_size;
- }
-
- /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
- target_ip = ((uint64_t *) value)[0];
- target_gp = ((uint64_t *) value)[1];
-
- /* Look for existing PLT entry. */
- while (plt->bundle[0][0]) {
- if (plt_target(plt) == target_ip)
- goto found;
- if (++plt >= plt_end)
- BUG();
- }
- *plt = ia64_plt_template;
- if (!patch_plt(mod, plt, target_ip, target_gp)) {
- *okp = 0;
- return 0;
- }
-#if ARCH_MODULE_DEBUG
- if (plt_target(plt) != target_ip) {
- printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
- __func__, target_ip, plt_target(plt));
- *okp = 0;
- return 0;
- }
-#endif
- found:
- return (uint64_t) plt;
-}
-
-/* Get function descriptor for VALUE. */
-static uint64_t
-get_fdesc (struct module *mod, uint64_t value, int *okp)
-{
- struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
-
- if (!*okp)
- return 0;
-
- if (!value) {
- printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
- return 0;
- }
-
- if (!is_internal(mod, value))
- /*
- * If it's not a module-local entry-point, "value" already points to a
- * function-descriptor.
- */
- return value;
-
- /* Look for existing function descriptor. */
- while (fdesc->addr) {
- if (fdesc->addr == value)
- return (uint64_t)fdesc;
- if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
- BUG();
- }
-
- /* Create new one */
- fdesc->addr = value;
- fdesc->gp = mod->arch.gp;
- return (uint64_t) fdesc;
-}
-
-static inline int
-do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
- Elf64_Shdr *sec, void *location)
-{
- enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
- enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
- uint64_t val;
- int ok = 1;
-
- val = sym->st_value + addend;
-
- switch (formula) {
- case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
- case RV_DIRECT:
- break;
-
- case RV_GPREL: val -= mod->arch.gp; break;
- case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
- case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
- case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
- case RV_SECREL: val -= sec->sh_addr; break;
- case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
-
- case RV_PCREL:
- switch (r_type) {
- case R_IA64_PCREL21B:
- if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
- (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
- /*
- * Init section may have been allocated far away from core,
- * if the branch won't reach, then allocate a plt for it.
- */
- uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
- if (delta + (1 << 20) >= (1 << 21)) {
- val = get_fdesc(mod, val, &ok);
- val = get_plt(mod, location, val, &ok);
- }
- } else if (!is_internal(mod, val))
- val = get_plt(mod, location, val, &ok);
- fallthrough;
- default:
- val -= bundle(location);
- break;
-
- case R_IA64_PCREL32MSB:
- case R_IA64_PCREL32LSB:
- case R_IA64_PCREL64MSB:
- case R_IA64_PCREL64LSB:
- val -= (uint64_t) location;
- break;
-
- }
- switch (r_type) {
- case R_IA64_PCREL60B: format = RF_INSN60; break;
- case R_IA64_PCREL21B: format = RF_INSN21B; break;
- case R_IA64_PCREL21M: format = RF_INSN21M; break;
- case R_IA64_PCREL21F: format = RF_INSN21F; break;
- default: break;
- }
- break;
-
- case RV_BDREL:
- val -= (uint64_t) (in_init(mod, val) ? mod->mem[MOD_INIT_TEXT].base :
- mod->mem[MOD_TEXT].base);
- break;
-
- case RV_LTV:
- /* can link-time value relocs happen here? */
- BUG();
- break;
-
- case RV_PCREL2:
- if (r_type == R_IA64_PCREL21BI) {
- if (!is_internal(mod, val)) {
- printk(KERN_ERR "%s: %s reloc against "
- "non-local symbol (%lx)\n", __func__,
- reloc_name[r_type], (unsigned long)val);
- return -ENOEXEC;
- }
- format = RF_INSN21B;
- }
- val -= bundle(location);
- break;
-
- case RV_SPECIAL:
- switch (r_type) {
- case R_IA64_IPLTMSB:
- case R_IA64_IPLTLSB:
- val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
- format = RF_64LSB;
- if (r_type == R_IA64_IPLTMSB)
- format = RF_64MSB;
- break;
-
- case R_IA64_SUB:
- val = addend - sym->st_value;
- format = RF_INSN64;
- break;
-
- case R_IA64_LTOFF22X:
- if (gp_addressable(mod, val))
- val -= mod->arch.gp;
- else
- val = get_ltoff(mod, val, &ok);
- format = RF_INSN22;
- break;
-
- case R_IA64_LDXMOV:
- if (gp_addressable(mod, val)) {
- /* turn "ld8" into "mov": */
- DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
- ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
- }
- return 0;
-
- default:
- if (reloc_name[r_type])
- printk(KERN_ERR "%s: special reloc %s not supported",
- mod->name, reloc_name[r_type]);
- else
- printk(KERN_ERR "%s: unknown special reloc %x\n",
- mod->name, r_type);
- return -ENOEXEC;
- }
- break;
-
- case RV_TPREL:
- case RV_LTREL_TPREL:
- case RV_DTPMOD:
- case RV_LTREL_DTPMOD:
- case RV_DTPREL:
- case RV_LTREL_DTPREL:
- printk(KERN_ERR "%s: %s reloc not supported\n",
- mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
- return -ENOEXEC;
-
- default:
- printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
- return -ENOEXEC;
- }
-
- if (!ok)
- return -ENOEXEC;
-
- DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
- reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
-
- switch (format) {
- case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
- case RF_INSN22: ok = apply_imm22(mod, location, val); break;
- case RF_INSN64: ok = apply_imm64(mod, location, val); break;
- case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
- case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
- case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
- case RF_32MSB: /* ia64 Linux is little-endian... */
- case RF_64MSB: /* ia64 Linux is little-endian... */
- case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
- case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
- case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
- printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
- mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
- return -ENOEXEC;
-
- default:
- printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
- mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
- return -ENOEXEC;
- }
- return ok ? 0 : -ENOEXEC;
-}
-
-int
-apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- unsigned int relsec, struct module *mod)
-{
- unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
- Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
- Elf64_Shdr *target_sec;
- int ret;
-
- DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
- relsec, n, sechdrs[relsec].sh_info);
-
- target_sec = sechdrs + sechdrs[relsec].sh_info;
-
- if (target_sec->sh_entsize == ~0UL)
- /*
- * If target section wasn't allocated, we don't need to relocate it.
- * Happens, e.g., for debug sections.
- */
- return 0;
-
- if (!mod->arch.gp) {
- /*
- * XXX Should have an arch-hook for running this after final section
- * addresses have been selected...
- */
- uint64_t gp;
- struct module_memory *mod_mem;
-
- mod_mem = &mod->mem[MOD_DATA];
- if (mod_mem->size > MAX_LTOFF)
- /*
- * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
- * at the end of the module.
- */
- gp = mod_mem->size - MAX_LTOFF / 2;
- else
- gp = mod_mem->size / 2;
- gp = (uint64_t) mod_mem->base + ((gp + 7) & -8);
- mod->arch.gp = gp;
- DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
- }
-
- for (i = 0; i < n; i++) {
- ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
- ((Elf64_Sym *) sechdrs[symindex].sh_addr
- + ELF64_R_SYM(rela[i].r_info)),
- rela[i].r_addend, target_sec,
- (void *) target_sec->sh_addr + rela[i].r_offset);
- if (ret < 0)
- return ret;
- }
- return 0;
-}
-
-/*
- * Modules contain a single unwind table which covers both the core and the init text
- * sections but since the two are not contiguous, we need to split this table up such that
- * we can register (and unregister) each "segment" separately. Fortunately, this sounds
- * more complicated than it really is.
- */
-static void
-register_unwind_table (struct module *mod)
-{
- struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
- struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
- struct unw_table_entry *e1, *e2, *core, *init;
- unsigned long num_init = 0, num_core = 0;
-
- /* First, count how many init and core unwind-table entries there are. */
- for (e1 = start; e1 < end; ++e1)
- if (in_init(mod, e1->start_offset))
- ++num_init;
- else
- ++num_core;
- /*
- * Second, sort the table such that all unwind-table entries for the init and core
- * text sections are nicely separated. We do this with a stupid bubble sort
- * (unwind tables don't get ridiculously huge).
- */
- for (e1 = start; e1 < end; ++e1) {
- for (e2 = e1 + 1; e2 < end; ++e2) {
- if (e2->start_offset < e1->start_offset) {
- swap(*e1, *e2);
- }
- }
- }
- /*
- * Third, locate the init and core segments in the unwind table:
- */
- if (in_init(mod, start->start_offset)) {
- init = start;
- core = start + num_init;
- } else {
- core = start;
- init = start + num_core;
- }
-
- DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
- mod->name, mod->arch.gp, num_init, num_core);
-
- /*
- * Fourth, register both tables (if not empty).
- */
- if (num_core > 0) {
- mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
- core, core + num_core);
- DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
- mod->arch.core_unw_table, core, core + num_core);
- }
- if (num_init > 0) {
- mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
- init, init + num_init);
- DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
- mod->arch.init_unw_table, init, init + num_init);
- }
-}
-
-int
-module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
-{
- struct mod_arch_specific *mas = &mod->arch;
-
- DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
- if (mas->unwind)
- register_unwind_table(mod);
-
- /*
- * ".opd" was already relocated to the final destination. Store
- * it's address for use in symbolizer.
- */
- mas->opd_addr = (void *)mas->opd->sh_addr;
- mas->opd_size = mas->opd->sh_size;
-
- /*
- * Module relocation was already done at this point. Section
- * headers are about to be deleted. Wipe out load-time context.
- */
- mas->core_plt = NULL;
- mas->init_plt = NULL;
- mas->got = NULL;
- mas->opd = NULL;
- mas->unwind = NULL;
- mas->gp = 0;
- mas->next_got_entry = 0;
-
- return 0;
-}
-
-void
-module_arch_cleanup (struct module *mod)
-{
- if (mod->arch.init_unw_table) {
- unw_remove_unwind_table(mod->arch.init_unw_table);
- mod->arch.init_unw_table = NULL;
- }
- if (mod->arch.core_unw_table) {
- unw_remove_unwind_table(mod->arch.core_unw_table);
- mod->arch.core_unw_table = NULL;
- }
-}
-
-void *dereference_module_function_descriptor(struct module *mod, void *ptr)
-{
- struct mod_arch_specific *mas = &mod->arch;
-
- if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
- return ptr;
-
- return dereference_function_descriptor(ptr);
-}
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
deleted file mode 100644
index 025e5133c8..0000000000
--- a/arch/ia64/kernel/msi_ia64.c
+++ /dev/null
@@ -1,198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * MSI hooks for standard x86 apic
- */
-
-#include <linux/pci.h>
-#include <linux/irq.h>
-#include <linux/msi.h>
-#include <linux/dmar.h>
-#include <asm/smp.h>
-#include <asm/msidef.h>
-
-static struct irq_chip ia64_msi_chip;
-
-#ifdef CONFIG_SMP
-static int ia64_set_msi_irq_affinity(struct irq_data *idata,
- const cpumask_t *cpu_mask, bool force)
-{
- struct msi_msg msg;
- u32 addr, data;
- int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
- unsigned int irq = idata->irq;
-
- if (irq_prepare_move(irq, cpu))
- return -1;
-
- __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
-
- addr = msg.address_lo;
- addr &= MSI_ADDR_DEST_ID_MASK;
- addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
- msg.address_lo = addr;
-
- data = msg.data;
- data &= MSI_DATA_VECTOR_MASK;
- data |= MSI_DATA_VECTOR(irq_to_vector(irq));
- msg.data = data;
-
- pci_write_msi_msg(irq, &msg);
- irq_data_update_affinity(idata, cpumask_of(cpu));
-
- return 0;
-}
-#endif /* CONFIG_SMP */
-
-int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
-{
- struct msi_msg msg;
- unsigned long dest_phys_id;
- int irq, vector;
-
- irq = create_irq();
- if (irq < 0)
- return irq;
-
- irq_set_msi_desc(irq, desc);
- dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
- cpu_online_mask));
- vector = irq_to_vector(irq);
-
- msg.address_hi = 0;
- msg.address_lo =
- MSI_ADDR_HEADER |
- MSI_ADDR_DEST_MODE_PHYS |
- MSI_ADDR_REDIRECTION_CPU |
- MSI_ADDR_DEST_ID_CPU(dest_phys_id);
-
- msg.data =
- MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
- MSI_DATA_DELIVERY_FIXED |
- MSI_DATA_VECTOR(vector);
-
- pci_write_msi_msg(irq, &msg);
- irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
-
- return 0;
-}
-
-void arch_teardown_msi_irq(unsigned int irq)
-{
- destroy_irq(irq);
-}
-
-static void ia64_ack_msi_irq(struct irq_data *data)
-{
- irq_complete_move(data->irq);
- irq_move_irq(data);
- ia64_eoi();
-}
-
-static int ia64_msi_retrigger_irq(struct irq_data *data)
-{
- unsigned int vector = irq_to_vector(data->irq);
- ia64_resend_irq(vector);
-
- return 1;
-}
-
-/*
- * Generic ops used on most IA64 platforms.
- */
-static struct irq_chip ia64_msi_chip = {
- .name = "PCI-MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
- .irq_ack = ia64_ack_msi_irq,
-#ifdef CONFIG_SMP
- .irq_set_affinity = ia64_set_msi_irq_affinity,
-#endif
- .irq_retrigger = ia64_msi_retrigger_irq,
-};
-
-#ifdef CONFIG_INTEL_IOMMU
-#ifdef CONFIG_SMP
-static int dmar_msi_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- unsigned int irq = data->irq;
- struct irq_cfg *cfg = irq_cfg + irq;
- struct msi_msg msg;
- int cpu = cpumask_first_and(mask, cpu_online_mask);
-
- if (irq_prepare_move(irq, cpu))
- return -1;
-
- dmar_msi_read(irq, &msg);
-
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(cfg->vector);
- msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
- msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
-
- dmar_msi_write(irq, &msg);
- irq_data_update_affinity(data, mask);
-
- return 0;
-}
-#endif /* CONFIG_SMP */
-
-static struct irq_chip dmar_msi_type = {
- .name = "DMAR_MSI",
- .irq_unmask = dmar_msi_unmask,
- .irq_mask = dmar_msi_mask,
- .irq_ack = ia64_ack_msi_irq,
-#ifdef CONFIG_SMP
- .irq_set_affinity = dmar_msi_set_affinity,
-#endif
- .irq_retrigger = ia64_msi_retrigger_irq,
-};
-
-static void
-msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
-{
- struct irq_cfg *cfg = irq_cfg + irq;
- unsigned dest;
-
- dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
- cpu_online_mask));
-
- msg->address_hi = 0;
- msg->address_lo =
- MSI_ADDR_HEADER |
- MSI_ADDR_DEST_MODE_PHYS |
- MSI_ADDR_REDIRECTION_CPU |
- MSI_ADDR_DEST_ID_CPU(dest);
-
- msg->data =
- MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
- MSI_DATA_DELIVERY_FIXED |
- MSI_DATA_VECTOR(cfg->vector);
-}
-
-int dmar_alloc_hwirq(int id, int node, void *arg)
-{
- int irq;
- struct msi_msg msg;
-
- irq = create_irq();
- if (irq > 0) {
- irq_set_handler_data(irq, arg);
- irq_set_chip_and_handler_name(irq, &dmar_msi_type,
- handle_edge_irq, "edge");
- msi_compose_msg(NULL, irq, &msg);
- dmar_msi_write(irq, &msg);
- }
-
- return irq;
-}
-
-void dmar_free_hwirq(int irq)
-{
- irq_set_handler_data(irq, NULL);
- destroy_irq(irq);
-}
-#endif /* CONFIG_INTEL_IOMMU */
-
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
deleted file mode 100644
index 8a959f2066..0000000000
--- a/arch/ia64/kernel/numa.c
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- *
- * ia64 kernel NUMA specific stuff
- *
- * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Jesse Barnes <jbarnes@sgi.com>
- */
-#include <linux/topology.h>
-#include <linux/module.h>
-#include <asm/processor.h>
-#include <asm/smp.h>
-
-u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_to_node_map);
-
-cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
-EXPORT_SYMBOL(node_to_cpu_mask);
-
-void map_cpu_to_node(int cpu, int nid)
-{
- int oldnid;
- if (nid < 0) { /* just initialize by zero */
- cpu_to_node_map[cpu] = 0;
- return;
- }
- /* sanity check first */
- oldnid = cpu_to_node_map[cpu];
- if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) {
- return; /* nothing to do */
- }
- /* we don't have cpu-driven node hot add yet...
- In usual case, node is created from SRAT at boot time. */
- if (!node_online(nid))
- nid = first_online_node;
- cpu_to_node_map[cpu] = nid;
- cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]);
- return;
-}
-
-void unmap_cpu_from_node(int cpu, int nid)
-{
- WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid]));
- WARN_ON(cpu_to_node_map[cpu] != nid);
- cpu_to_node_map[cpu] = 0;
- cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]);
-}
-
-
-/**
- * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
- *
- * Build cpu to node mapping and initialize the per node cpu masks using
- * info from the node_cpuid array handed to us by ACPI.
- */
-void __init build_cpu_to_node_map(void)
-{
- int cpu, i, node;
-
- for(node=0; node < MAX_NUMNODES; node++)
- cpumask_clear(&node_to_cpu_mask[node]);
-
- for_each_possible_early_cpu(cpu) {
- node = NUMA_NO_NODE;
- for (i = 0; i < NR_CPUS; ++i)
- if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
- node = node_cpuid[i].nid;
- break;
- }
- map_cpu_to_node(cpu, node);
- }
-}
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
deleted file mode 100644
index fb6db6966f..0000000000
--- a/arch/ia64/kernel/pal.S
+++ /dev/null
@@ -1,306 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * PAL Firmware support
- * IA-64 Processor Programmers Reference Vol 2
- *
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
- * David Mosberger <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * 05/22/2000 eranian Added support for stacked register calls
- * 05/24/2000 eranian Added support for physical mode static calls
- */
-
-#include <linux/export.h>
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-
- .data
-pal_entry_point:
- data8 ia64_pal_default_handler
- .text
-
-/*
- * Set the PAL entry point address. This could be written in C code, but we
- * do it here to keep it all in one module (besides, it's so trivial that it's
- * not a big deal).
- *
- * in0 Address of the PAL entry point (text address, NOT a function
- * descriptor).
- */
-GLOBAL_ENTRY(ia64_pal_handler_init)
- alloc r3=ar.pfs,1,0,0,0
- movl r2=pal_entry_point
- ;;
- st8 [r2]=in0
- br.ret.sptk.many rp
-END(ia64_pal_handler_init)
-
-/*
- * Default PAL call handler. This needs to be coded in assembly because it
- * uses the static calling convention, i.e., the RSE may not be used and
- * calls are done via "br.cond" (not "br.call").
- */
-GLOBAL_ENTRY(ia64_pal_default_handler)
- mov r8=-1
- br.cond.sptk.many rp
-END(ia64_pal_default_handler)
-
-/*
- * Make a PAL call using the static calling convention.
- *
- * in0 Index of PAL service
- * in1 - in3 Remaining PAL arguments
- */
-GLOBAL_ENTRY(ia64_pal_call_static)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
- alloc loc1 = ar.pfs,4,5,0,0
- movl loc2 = pal_entry_point
-1: {
- mov r28 = in0
- mov r29 = in1
- mov r8 = ip
- }
- ;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- adds r8 = 1f-1b,r8
- mov loc4=ar.rsc // save RSE configuration
- ;;
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov loc3 = psr
- mov loc0 = rp
- .body
- mov r30 = in2
-
- mov r31 = in3
- mov b7 = loc2
-
- rsm psr.i
- ;;
- mov rp = r8
- br.cond.sptk.many b7
-1: mov psr.l = loc3
- mov ar.rsc = loc4 // restore RSE configuration
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- srlz.d // serialize restoration of psr.l
- br.ret.sptk.many b0
-END(ia64_pal_call_static)
-EXPORT_SYMBOL(ia64_pal_call_static)
-
-/*
- * Make a PAL call using the stacked registers calling convention.
- *
- * Inputs:
- * in0 Index of PAL service
- * in2 - in3 Remaining PAL arguments
- */
-GLOBAL_ENTRY(ia64_pal_call_stacked)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
- alloc loc1 = ar.pfs,4,4,4,0
- movl loc2 = pal_entry_point
-
- mov r28 = in0 // Index MUST be copied to r28
- mov out0 = in0 // AND in0 of PAL function
- mov loc0 = rp
- .body
- ;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- mov out1 = in1
- mov out2 = in2
- mov out3 = in3
- mov loc3 = psr
- ;;
- rsm psr.i
- mov b7 = loc2
- ;;
- br.call.sptk.many rp=b7 // now make the call
-.ret0: mov psr.l = loc3
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- srlz.d // serialize restoration of psr.l
- br.ret.sptk.many b0
-END(ia64_pal_call_stacked)
-EXPORT_SYMBOL(ia64_pal_call_stacked)
-
-/*
- * Make a physical mode PAL call using the static registers calling convention.
- *
- * Inputs:
- * in0 Index of PAL service
- * in2 - in3 Remaining PAL arguments
- *
- * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
- * So we don't need to clear them.
- */
-#define PAL_PSR_BITS_TO_CLEAR \
- (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT |\
- IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
- IA64_PSR_DFL | IA64_PSR_DFH)
-
-#define PAL_PSR_BITS_TO_SET \
- (IA64_PSR_BN)
-
-
-GLOBAL_ENTRY(ia64_pal_call_phys_static)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
- alloc loc1 = ar.pfs,4,7,0,0
- movl loc2 = pal_entry_point
-1: {
- mov r28 = in0 // copy procedure index
- mov r8 = ip // save ip to compute branch
- mov loc0 = rp // save rp
- }
- .body
- ;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- mov r29 = in1 // first argument
- mov r30 = in2 // copy arg2
- mov r31 = in3 // copy arg3
- ;;
- mov loc3 = psr // save psr
- adds r8 = 1f-1b,r8 // calculate return address for call
- ;;
- mov loc4=ar.rsc // save RSE configuration
- dep.z loc2=loc2,0,61 // convert pal entry point to physical
- tpa r8=r8 // convert rp to physical
- ;;
- mov b7 = loc2 // install target to branch reg
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- movl r16=PAL_PSR_BITS_TO_CLEAR
- movl r17=PAL_PSR_BITS_TO_SET
- ;;
- or loc3=loc3,r17 // add in psr the bits to set
- ;;
- andcm r16=loc3,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode_phys
- mov rp = r8 // install return address (physical)
- mov loc5 = r19
- mov loc6 = r20
- br.cond.sptk.many b7
-1:
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov r16=loc3 // r16= original psr
- mov r19=loc5
- mov r20=loc6
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
- mov psr.l = loc3 // restore init PSR
-
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- mov ar.rsc=loc4 // restore RSE configuration
- srlz.d // serialize restoration of psr.l
- br.ret.sptk.many b0
-END(ia64_pal_call_phys_static)
-EXPORT_SYMBOL(ia64_pal_call_phys_static)
-
-/*
- * Make a PAL call using the stacked registers in physical mode.
- *
- * Inputs:
- * in0 Index of PAL service
- * in2 - in3 Remaining PAL arguments
- */
-GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
- alloc loc1 = ar.pfs,5,7,4,0
- movl loc2 = pal_entry_point
-1: {
- mov r28 = in0 // copy procedure index
- mov loc0 = rp // save rp
- }
- .body
- ;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- mov loc3 = psr // save psr
- ;;
- mov loc4=ar.rsc // save RSE configuration
- dep.z loc2=loc2,0,61 // convert pal entry point to physical
- ;;
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- movl r16=PAL_PSR_BITS_TO_CLEAR
- movl r17=PAL_PSR_BITS_TO_SET
- ;;
- or loc3=loc3,r17 // add in psr the bits to set
- mov b7 = loc2 // install target to branch reg
- ;;
- andcm r16=loc3,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode_phys
-
- mov out0 = in0 // first argument
- mov out1 = in1 // copy arg2
- mov out2 = in2 // copy arg3
- mov out3 = in3 // copy arg3
- mov loc5 = r19
- mov loc6 = r20
-
- br.call.sptk.many rp=b7 // now make the call
-
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov r16=loc3 // r16= original psr
- mov r19=loc5
- mov r20=loc6
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-
- mov psr.l = loc3 // restore init PSR
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- mov ar.rsc=loc4 // restore RSE configuration
- srlz.d // serialize restoration of psr.l
- br.ret.sptk.many b0
-END(ia64_pal_call_phys_stacked)
-EXPORT_SYMBOL(ia64_pal_call_phys_stacked)
-
-/*
- * Save scratch fp scratch regs which aren't saved in pt_regs already
- * (fp10-fp15).
- *
- * NOTE: We need to do this since firmware (SAL and PAL) may use any of the
- * scratch regs fp-low partition.
- *
- * Inputs:
- * in0 Address of stack storage for fp regs
- */
-GLOBAL_ENTRY(ia64_save_scratch_fpregs)
- alloc r3=ar.pfs,1,0,0,0
- add r2=16,in0
- ;;
- stf.spill [in0] = f10,32
- stf.spill [r2] = f11,32
- ;;
- stf.spill [in0] = f12,32
- stf.spill [r2] = f13,32
- ;;
- stf.spill [in0] = f14,32
- stf.spill [r2] = f15,32
- br.ret.sptk.many rp
-END(ia64_save_scratch_fpregs)
-EXPORT_SYMBOL(ia64_save_scratch_fpregs)
-
-/*
- * Load scratch fp scratch regs (fp10-fp15)
- *
- * Inputs:
- * in0 Address of stack storage for fp regs
- */
-GLOBAL_ENTRY(ia64_load_scratch_fpregs)
- alloc r3=ar.pfs,1,0,0,0
- add r2=16,in0
- ;;
- ldf.fill f10 = [in0],32
- ldf.fill f11 = [r2],32
- ;;
- ldf.fill f12 = [in0],32
- ldf.fill f13 = [r2],32
- ;;
- ldf.fill f14 = [in0],32
- ldf.fill f15 = [r2],32
- br.ret.sptk.many rp
-END(ia64_load_scratch_fpregs)
-EXPORT_SYMBOL(ia64_load_scratch_fpregs)
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
deleted file mode 100644
index b9ae093bfe..0000000000
--- a/arch/ia64/kernel/palinfo.c
+++ /dev/null
@@ -1,942 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * palinfo.c
- *
- * Prints processor specific information reported by PAL.
- * This code is based on specification of PAL as of the
- * Intel IA-64 Architecture Software Developer's Manual v1.0.
- *
- *
- * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2004 Intel Corporation
- * Ashok Raj <ashok.raj@intel.com>
- *
- * 05/26/2000 S.Eranian initial release
- * 08/21/2000 S.Eranian updated to July 2000 PAL specs
- * 02/05/2001 S.Eranian fixed module support
- * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
- * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
- * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
- */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/efi.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <linux/smp.h>
-
-MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
-MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
-MODULE_LICENSE("GPL");
-
-#define PALINFO_VERSION "0.5"
-
-typedef int (*palinfo_func_t)(struct seq_file *);
-
-typedef struct {
- const char *name; /* name of the proc entry */
- palinfo_func_t proc_read; /* function to call for reading */
- struct proc_dir_entry *entry; /* registered entry (removal) */
-} palinfo_entry_t;
-
-
-/*
- * A bunch of string array to get pretty printing
- */
-
-static const char *cache_types[] = {
- "", /* not used */
- "Instruction",
- "Data",
- "Data/Instruction" /* unified */
-};
-
-static const char *cache_mattrib[]={
- "WriteThrough",
- "WriteBack",
- "", /* reserved */
- "" /* reserved */
-};
-
-static const char *cache_st_hints[]={
- "Temporal, level 1",
- "Reserved",
- "Reserved",
- "Non-temporal, all levels",
- "Reserved",
- "Reserved",
- "Reserved",
- "Reserved"
-};
-
-static const char *cache_ld_hints[]={
- "Temporal, level 1",
- "Non-temporal, level 1",
- "Reserved",
- "Non-temporal, all levels",
- "Reserved",
- "Reserved",
- "Reserved",
- "Reserved"
-};
-
-static const char *rse_hints[]={
- "enforced lazy",
- "eager stores",
- "eager loads",
- "eager loads and stores"
-};
-
-#define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
-
-static const char *mem_attrib[]={
- "WB", /* 000 */
- "SW", /* 001 */
- "010", /* 010 */
- "011", /* 011 */
- "UC", /* 100 */
- "UCE", /* 101 */
- "WC", /* 110 */
- "NaTPage" /* 111 */
-};
-
-/*
- * Take a 64bit vector and produces a string such that
- * if bit n is set then 2^n in clear text is generated. The adjustment
- * to the right unit is also done.
- *
- * Input:
- * - a pointer to a buffer to hold the string
- * - a 64-bit vector
- * Output:
- * - a pointer to the end of the buffer
- *
- */
-static void bitvector_process(struct seq_file *m, u64 vector)
-{
- int i,j;
- static const char *units[]={ "", "K", "M", "G", "T" };
-
- for (i=0, j=0; i < 64; i++ , j=i/10) {
- if (vector & 0x1)
- seq_printf(m, "%d%s ", 1 << (i-j*10), units[j]);
- vector >>= 1;
- }
-}
-
-/*
- * Take a 64bit vector and produces a string such that
- * if bit n is set then register n is present. The function
- * takes into account consecutive registers and prints out ranges.
- *
- * Input:
- * - a pointer to a buffer to hold the string
- * - a 64-bit vector
- * Ouput:
- * - a pointer to the end of the buffer
- *
- */
-static void bitregister_process(struct seq_file *m, u64 *reg_info, int max)
-{
- int i, begin, skip = 0;
- u64 value = reg_info[0];
-
- value >>= i = begin = ffs(value) - 1;
-
- for(; i < max; i++ ) {
-
- if (i != 0 && (i%64) == 0) value = *++reg_info;
-
- if ((value & 0x1) == 0 && skip == 0) {
- if (begin <= i - 2)
- seq_printf(m, "%d-%d ", begin, i-1);
- else
- seq_printf(m, "%d ", i-1);
- skip = 1;
- begin = -1;
- } else if ((value & 0x1) && skip == 1) {
- skip = 0;
- begin = i;
- }
- value >>=1;
- }
- if (begin > -1) {
- if (begin < 127)
- seq_printf(m, "%d-127", begin);
- else
- seq_puts(m, "127");
- }
-}
-
-static int power_info(struct seq_file *m)
-{
- s64 status;
- u64 halt_info_buffer[8];
- pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
- int i;
-
- status = ia64_pal_halt_info(halt_info);
- if (status != 0) return 0;
-
- for (i=0; i < 8 ; i++ ) {
- if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
- seq_printf(m,
- "Power level %d:\n"
- "\tentry_latency : %d cycles\n"
- "\texit_latency : %d cycles\n"
- "\tpower consumption : %d mW\n"
- "\tCache+TLB coherency : %s\n", i,
- halt_info[i].pal_power_mgmt_info_s.entry_latency,
- halt_info[i].pal_power_mgmt_info_s.exit_latency,
- halt_info[i].pal_power_mgmt_info_s.power_consumption,
- halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
- } else {
- seq_printf(m,"Power level %d: not implemented\n", i);
- }
- }
- return 0;
-}
-
-static int cache_info(struct seq_file *m)
-{
- unsigned long i, levels, unique_caches;
- pal_cache_config_info_t cci;
- int j, k;
- long status;
-
- if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
- printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
- return 0;
- }
-
- seq_printf(m, "Cache levels : %ld\nUnique caches : %ld\n\n",
- levels, unique_caches);
-
- for (i=0; i < levels; i++) {
- for (j=2; j >0 ; j--) {
- /* even without unification some level may not be present */
- if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0)
- continue;
-
- seq_printf(m,
- "%s Cache level %lu:\n"
- "\tSize : %u bytes\n"
- "\tAttributes : ",
- cache_types[j+cci.pcci_unified], i+1,
- cci.pcci_cache_size);
-
- if (cci.pcci_unified)
- seq_puts(m, "Unified ");
-
- seq_printf(m, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
-
- seq_printf(m,
- "\tAssociativity : %d\n"
- "\tLine size : %d bytes\n"
- "\tStride : %d bytes\n",
- cci.pcci_assoc,
- 1<<cci.pcci_line_size,
- 1<<cci.pcci_stride);
- if (j == 1)
- seq_puts(m, "\tStore latency : N/A\n");
- else
- seq_printf(m, "\tStore latency : %d cycle(s)\n",
- cci.pcci_st_latency);
-
- seq_printf(m,
- "\tLoad latency : %d cycle(s)\n"
- "\tStore hints : ", cci.pcci_ld_latency);
-
- for(k=0; k < 8; k++ ) {
- if ( cci.pcci_st_hints & 0x1)
- seq_printf(m, "[%s]", cache_st_hints[k]);
- cci.pcci_st_hints >>=1;
- }
- seq_puts(m, "\n\tLoad hints : ");
-
- for(k=0; k < 8; k++ ) {
- if (cci.pcci_ld_hints & 0x1)
- seq_printf(m, "[%s]", cache_ld_hints[k]);
- cci.pcci_ld_hints >>=1;
- }
- seq_printf(m,
- "\n\tAlias boundary : %d byte(s)\n"
- "\tTag LSB : %d\n"
- "\tTag MSB : %d\n",
- 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
- cci.pcci_tag_msb);
-
- /* when unified, data(j=2) is enough */
- if (cci.pcci_unified)
- break;
- }
- }
- return 0;
-}
-
-
-static int vm_info(struct seq_file *m)
-{
- u64 tr_pages =0, vw_pages=0, tc_pages;
- u64 attrib;
- pal_vm_info_1_u_t vm_info_1;
- pal_vm_info_2_u_t vm_info_2;
- pal_tc_info_u_t tc_info;
- ia64_ptce_info_t ptce;
- const char *sep;
- int i, j;
- long status;
-
- if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
- printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
- } else {
-
- seq_printf(m,
- "Physical Address Space : %d bits\n"
- "Virtual Address Space : %d bits\n"
- "Protection Key Registers(PKR) : %d\n"
- "Implemented bits in PKR.key : %d\n"
- "Hash Tag ID : 0x%x\n"
- "Size of RR.rid : %d\n"
- "Max Purges : ",
- vm_info_1.pal_vm_info_1_s.phys_add_size,
- vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
- vm_info_1.pal_vm_info_1_s.max_pkr+1,
- vm_info_1.pal_vm_info_1_s.key_size,
- vm_info_1.pal_vm_info_1_s.hash_tag_id,
- vm_info_2.pal_vm_info_2_s.rid_size);
- if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
- seq_puts(m, "unlimited\n");
- else
- seq_printf(m, "%d\n",
- vm_info_2.pal_vm_info_2_s.max_purges ?
- vm_info_2.pal_vm_info_2_s.max_purges : 1);
- }
-
- if (ia64_pal_mem_attrib(&attrib) == 0) {
- seq_puts(m, "Supported memory attributes : ");
- sep = "";
- for (i = 0; i < 8; i++) {
- if (attrib & (1 << i)) {
- seq_printf(m, "%s%s", sep, mem_attrib[i]);
- sep = ", ";
- }
- }
- seq_putc(m, '\n');
- }
-
- if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
- printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
- } else {
-
- seq_printf(m,
- "\nTLB walker : %simplemented\n"
- "Number of DTR : %d\n"
- "Number of ITR : %d\n"
- "TLB insertable page sizes : ",
- vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
- vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
- vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
-
- bitvector_process(m, tr_pages);
-
- seq_puts(m, "\nTLB purgeable page sizes : ");
-
- bitvector_process(m, vw_pages);
- }
-
- if ((status = ia64_get_ptce(&ptce)) != 0) {
- printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
- } else {
- seq_printf(m,
- "\nPurge base address : 0x%016lx\n"
- "Purge outer loop count : %d\n"
- "Purge inner loop count : %d\n"
- "Purge outer loop stride : %d\n"
- "Purge inner loop stride : %d\n",
- ptce.base, ptce.count[0], ptce.count[1],
- ptce.stride[0], ptce.stride[1]);
-
- seq_printf(m,
- "TC Levels : %d\n"
- "Unique TC(s) : %d\n",
- vm_info_1.pal_vm_info_1_s.num_tc_levels,
- vm_info_1.pal_vm_info_1_s.max_unique_tcs);
-
- for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
- for (j=2; j>0 ; j--) {
- tc_pages = 0; /* just in case */
-
- /* even without unification, some levels may not be present */
- if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0)
- continue;
-
- seq_printf(m,
- "\n%s Translation Cache Level %d:\n"
- "\tHash sets : %d\n"
- "\tAssociativity : %d\n"
- "\tNumber of entries : %d\n"
- "\tFlags : ",
- cache_types[j+tc_info.tc_unified], i+1,
- tc_info.tc_num_sets,
- tc_info.tc_associativity,
- tc_info.tc_num_entries);
-
- if (tc_info.tc_pf)
- seq_puts(m, "PreferredPageSizeOptimized ");
- if (tc_info.tc_unified)
- seq_puts(m, "Unified ");
- if (tc_info.tc_reduce_tr)
- seq_puts(m, "TCReduction");
-
- seq_puts(m, "\n\tSupported page sizes: ");
-
- bitvector_process(m, tc_pages);
-
- /* when unified date (j=2) is enough */
- if (tc_info.tc_unified)
- break;
- }
- }
- }
-
- seq_putc(m, '\n');
- return 0;
-}
-
-
-static int register_info(struct seq_file *m)
-{
- u64 reg_info[2];
- u64 info;
- unsigned long phys_stacked;
- pal_hints_u_t hints;
- unsigned long iregs, dregs;
- static const char * const info_type[] = {
- "Implemented AR(s)",
- "AR(s) with read side-effects",
- "Implemented CR(s)",
- "CR(s) with read side-effects",
- };
-
- for(info=0; info < 4; info++) {
- if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0)
- return 0;
- seq_printf(m, "%-32s : ", info_type[info]);
- bitregister_process(m, reg_info, 128);
- seq_putc(m, '\n');
- }
-
- if (ia64_pal_rse_info(&phys_stacked, &hints) == 0)
- seq_printf(m,
- "RSE stacked physical registers : %ld\n"
- "RSE load/store hints : %ld (%s)\n",
- phys_stacked, hints.ph_data,
- hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
-
- if (ia64_pal_debug_info(&iregs, &dregs))
- return 0;
-
- seq_printf(m,
- "Instruction debug register pairs : %ld\n"
- "Data debug register pairs : %ld\n", iregs, dregs);
-
- return 0;
-}
-
-static const char *const proc_features_0[]={ /* Feature set 0 */
- NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
- NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
- NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
- NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
- "Unimplemented instruction address fault",
- "INIT, PMI, and LINT pins",
- "Simple unimplemented instr addresses",
- "Variable P-state performance",
- "Virtual machine features implemented",
- "XIP,XPSR,XFS implemented",
- "XR1-XR3 implemented",
- "Disable dynamic predicate prediction",
- "Disable processor physical number",
- "Disable dynamic data cache prefetch",
- "Disable dynamic inst cache prefetch",
- "Disable dynamic branch prediction",
- NULL, NULL, NULL, NULL,
- "Disable P-states",
- "Enable MCA on Data Poisoning",
- "Enable vmsw instruction",
- "Enable extern environmental notification",
- "Disable BINIT on processor time-out",
- "Disable dynamic power management (DPM)",
- "Disable coherency",
- "Disable cache",
- "Enable CMCI promotion",
- "Enable MCA to BINIT promotion",
- "Enable MCA promotion",
- "Enable BERR promotion"
-};
-
-static const char *const proc_features_16[]={ /* Feature set 16 */
- "Disable ETM",
- "Enable ETM",
- "Enable MCA on half-way timer",
- "Enable snoop WC",
- NULL,
- "Enable Fast Deferral",
- "Disable MCA on memory aliasing",
- "Enable RSB",
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- "DP system processor",
- "Low Voltage",
- "HT supported",
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL
-};
-
-static const char *const *const proc_features[]={
- proc_features_0,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- proc_features_16,
- NULL, NULL, NULL, NULL,
-};
-
-static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control,
- unsigned long set)
-{
- const char *const *vf, *const *v;
- int i;
-
- vf = v = proc_features[set];
- for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) {
-
- if (!(control)) /* No remaining bits set */
- break;
- if (!(avail & 0x1)) /* Print only bits that are available */
- continue;
- if (vf)
- v = vf + i;
- if ( v && *v ) {
- seq_printf(m, "%-40s : %s %s\n", *v,
- avail & 0x1 ? (status & 0x1 ?
- "On " : "Off"): "",
- avail & 0x1 ? (control & 0x1 ?
- "Ctrl" : "NoCtrl"): "");
- } else {
- seq_printf(m, "Feature set %2ld bit %2d\t\t\t"
- " : %s %s\n",
- set, i,
- avail & 0x1 ? (status & 0x1 ?
- "On " : "Off"): "",
- avail & 0x1 ? (control & 0x1 ?
- "Ctrl" : "NoCtrl"): "");
- }
- }
-}
-
-static int processor_info(struct seq_file *m)
-{
- u64 avail=1, status=1, control=1, feature_set=0;
- s64 ret;
-
- do {
- ret = ia64_pal_proc_get_features(&avail, &status, &control,
- feature_set);
- if (ret < 0)
- return 0;
-
- if (ret == 1) {
- feature_set++;
- continue;
- }
-
- feature_set_info(m, avail, status, control, feature_set);
- feature_set++;
- } while(1);
-
- return 0;
-}
-
-static const char *const bus_features[]={
- NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
- NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
- NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
- NULL,NULL,
- "Request Bus Parking",
- "Bus Lock Mask",
- "Enable Half Transfer",
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- "Enable Cache Line Repl. Shared",
- "Enable Cache Line Repl. Exclusive",
- "Disable Transaction Queuing",
- "Disable Response Error Checking",
- "Disable Bus Error Checking",
- "Disable Bus Requester Internal Error Signalling",
- "Disable Bus Requester Error Signalling",
- "Disable Bus Initialization Event Checking",
- "Disable Bus Initialization Event Signalling",
- "Disable Bus Address Error Checking",
- "Disable Bus Address Error Signalling",
- "Disable Bus Data Error Checking"
-};
-
-
-static int bus_info(struct seq_file *m)
-{
- const char *const *v = bus_features;
- pal_bus_features_u_t av, st, ct;
- u64 avail, status, control;
- int i;
- s64 ret;
-
- if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0)
- return 0;
-
- avail = av.pal_bus_features_val;
- status = st.pal_bus_features_val;
- control = ct.pal_bus_features_val;
-
- for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
- if ( ! *v )
- continue;
- seq_printf(m, "%-48s : %s%s %s\n", *v,
- avail & 0x1 ? "" : "NotImpl",
- avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
- avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
- }
- return 0;
-}
-
-static int version_info(struct seq_file *m)
-{
- pal_version_u_t min_ver, cur_ver;
-
- if (ia64_pal_version(&min_ver, &cur_ver) != 0)
- return 0;
-
- seq_printf(m,
- "PAL_vendor : 0x%02x (min=0x%02x)\n"
- "PAL_A : %02x.%02x (min=%02x.%02x)\n"
- "PAL_B : %02x.%02x (min=%02x.%02x)\n",
- cur_ver.pal_version_s.pv_pal_vendor,
- min_ver.pal_version_s.pv_pal_vendor,
- cur_ver.pal_version_s.pv_pal_a_model,
- cur_ver.pal_version_s.pv_pal_a_rev,
- min_ver.pal_version_s.pv_pal_a_model,
- min_ver.pal_version_s.pv_pal_a_rev,
- cur_ver.pal_version_s.pv_pal_b_model,
- cur_ver.pal_version_s.pv_pal_b_rev,
- min_ver.pal_version_s.pv_pal_b_model,
- min_ver.pal_version_s.pv_pal_b_rev);
- return 0;
-}
-
-static int frequency_info(struct seq_file *m)
-{
- struct pal_freq_ratio proc, itc, bus;
- unsigned long base;
-
- if (ia64_pal_freq_base(&base) == -1)
- seq_puts(m, "Output clock : not implemented\n");
- else
- seq_printf(m, "Output clock : %ld ticks/s\n", base);
-
- if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
-
- seq_printf(m,
- "Processor/Clock ratio : %d/%d\n"
- "Bus/Clock ratio : %d/%d\n"
- "ITC/Clock ratio : %d/%d\n",
- proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
- return 0;
-}
-
-static int tr_info(struct seq_file *m)
-{
- long status;
- pal_tr_valid_u_t tr_valid;
- u64 tr_buffer[4];
- pal_vm_info_1_u_t vm_info_1;
- pal_vm_info_2_u_t vm_info_2;
- unsigned long i, j;
- unsigned long max[3], pgm;
- struct ifa_reg {
- unsigned long valid:1;
- unsigned long ig:11;
- unsigned long vpn:52;
- } *ifa_reg;
- struct itir_reg {
- unsigned long rv1:2;
- unsigned long ps:6;
- unsigned long key:24;
- unsigned long rv2:32;
- } *itir_reg;
- struct gr_reg {
- unsigned long p:1;
- unsigned long rv1:1;
- unsigned long ma:3;
- unsigned long a:1;
- unsigned long d:1;
- unsigned long pl:2;
- unsigned long ar:3;
- unsigned long ppn:38;
- unsigned long rv2:2;
- unsigned long ed:1;
- unsigned long ig:11;
- } *gr_reg;
- struct rid_reg {
- unsigned long ig1:1;
- unsigned long rv1:1;
- unsigned long ig2:6;
- unsigned long rid:24;
- unsigned long rv2:32;
- } *rid_reg;
-
- if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
- printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
- return 0;
- }
- max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
- max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
-
- for (i=0; i < 2; i++ ) {
- for (j=0; j < max[i]; j++) {
-
- status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
- if (status != 0) {
- printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
- i, j, status);
- continue;
- }
-
- ifa_reg = (struct ifa_reg *)&tr_buffer[2];
-
- if (ifa_reg->valid == 0)
- continue;
-
- gr_reg = (struct gr_reg *)tr_buffer;
- itir_reg = (struct itir_reg *)&tr_buffer[1];
- rid_reg = (struct rid_reg *)&tr_buffer[3];
-
- pgm = -1 << (itir_reg->ps - 12);
- seq_printf(m,
- "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
- "\tppn : 0x%lx\n"
- "\tvpn : 0x%lx\n"
- "\tps : ",
- "ID"[i], j,
- tr_valid.pal_tr_valid_s.access_rights_valid,
- tr_valid.pal_tr_valid_s.priv_level_valid,
- tr_valid.pal_tr_valid_s.dirty_bit_valid,
- tr_valid.pal_tr_valid_s.mem_attr_valid,
- (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
-
- bitvector_process(m, 1<< itir_reg->ps);
-
- seq_printf(m,
- "\n\tpl : %d\n"
- "\tar : %d\n"
- "\trid : %x\n"
- "\tp : %d\n"
- "\tma : %d\n"
- "\td : %d\n",
- gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
- gr_reg->d);
- }
- }
- return 0;
-}
-
-
-
-/*
- * List {name,function} pairs for every entry in /proc/palinfo/cpu*
- */
-static const palinfo_entry_t palinfo_entries[]={
- { "version_info", version_info, },
- { "vm_info", vm_info, },
- { "cache_info", cache_info, },
- { "power_info", power_info, },
- { "register_info", register_info, },
- { "processor_info", processor_info, },
- { "frequency_info", frequency_info, },
- { "bus_info", bus_info },
- { "tr_info", tr_info, }
-};
-
-#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
-
-static struct proc_dir_entry *palinfo_dir;
-
-/*
- * This data structure is used to pass which cpu,function is being requested
- * It must fit in a 64bit quantity to be passed to the proc callback routine
- *
- * In SMP mode, when we get a request for another CPU, we must call that
- * other CPU using IPI and wait for the result before returning.
- */
-typedef union {
- u64 value;
- struct {
- unsigned req_cpu: 32; /* for which CPU this info is */
- unsigned func_id: 32; /* which function is requested */
- } pal_func_cpu;
-} pal_func_cpu_u_t;
-
-#define req_cpu pal_func_cpu.req_cpu
-#define func_id pal_func_cpu.func_id
-
-#ifdef CONFIG_SMP
-
-/*
- * used to hold information about final function to call
- */
-typedef struct {
- palinfo_func_t func; /* pointer to function to call */
- struct seq_file *m; /* buffer to store results */
- int ret; /* return value from call */
-} palinfo_smp_data_t;
-
-
-/*
- * this function does the actual final call and he called
- * from the smp code, i.e., this is the palinfo callback routine
- */
-static void
-palinfo_smp_call(void *info)
-{
- palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
- data->ret = (*data->func)(data->m);
-}
-
-/*
- * function called to trigger the IPI, we need to access a remote CPU
- * Return:
- * 0 : error or nothing to output
- * otherwise how many bytes in the "page" buffer were written
- */
-static
-int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
-{
- palinfo_smp_data_t ptr;
- int ret;
-
- ptr.func = palinfo_entries[f->func_id].proc_read;
- ptr.m = m;
- ptr.ret = 0; /* just in case */
-
-
- /* will send IPI to other CPU and wait for completion of remote call */
- if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
- printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
- "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
- return 0;
- }
- return ptr.ret;
-}
-#else /* ! CONFIG_SMP */
-static
-int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
-{
- printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
- return 0;
-}
-#endif /* CONFIG_SMP */
-
-/*
- * Entry point routine: all calls go through this function
- */
-static int proc_palinfo_show(struct seq_file *m, void *v)
-{
- pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&m->private;
-
- /*
- * in SMP mode, we may need to call another CPU to get correct
- * information. PAL, by definition, is processor specific
- */
- if (f->req_cpu == get_cpu())
- (*palinfo_entries[f->func_id].proc_read)(m);
- else
- palinfo_handle_smp(m, f);
-
- put_cpu();
- return 0;
-}
-
-static int palinfo_add_proc(unsigned int cpu)
-{
- pal_func_cpu_u_t f;
- struct proc_dir_entry *cpu_dir;
- int j;
- char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
- sprintf(cpustr, "cpu%d", cpu);
-
- cpu_dir = proc_mkdir(cpustr, palinfo_dir);
- if (!cpu_dir)
- return -EINVAL;
-
- f.req_cpu = cpu;
-
- for (j=0; j < NR_PALINFO_ENTRIES; j++) {
- f.func_id = j;
- proc_create_single_data(palinfo_entries[j].name, 0, cpu_dir,
- proc_palinfo_show, (void *)f.value);
- }
- return 0;
-}
-
-static int palinfo_del_proc(unsigned int hcpu)
-{
- char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
-
- sprintf(cpustr, "cpu%d", hcpu);
- remove_proc_subtree(cpustr, palinfo_dir);
- return 0;
-}
-
-static enum cpuhp_state hp_online;
-
-static int __init palinfo_init(void)
-{
- int i = 0;
-
- printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
- palinfo_dir = proc_mkdir("pal", NULL);
- if (!palinfo_dir)
- return -ENOMEM;
-
- i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/palinfo:online",
- palinfo_add_proc, palinfo_del_proc);
- if (i < 0) {
- remove_proc_subtree("pal", NULL);
- return i;
- }
- hp_online = i;
- return 0;
-}
-
-static void __exit palinfo_exit(void)
-{
- cpuhp_remove_state(hp_online);
- remove_proc_subtree("pal", NULL);
-}
-
-module_init(palinfo_init);
-module_exit(palinfo_exit);
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
deleted file mode 100644
index 7f21a8c57e..0000000000
--- a/arch/ia64/kernel/patch.c
+++ /dev/null
@@ -1,237 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Instruction-patching support.
- *
- * Copyright (C) 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/patch.h>
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/unistd.h>
-
-/*
- * This was adapted from code written by Tony Luck:
- *
- * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle
- * like this:
- *
- * 6 6 5 4 3 2 1
- * 3210987654321098765432109876543210987654321098765432109876543210
- * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
- *
- * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
- */
-static u64
-get_imm64 (u64 insn_addr)
-{
- u64 *p = (u64 *) (insn_addr & -16); /* mask out slot number */
-
- return ( (p[1] & 0x0800000000000000UL) << 4) | /*A*/
- ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
- ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
- ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
- ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
- ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
- ((p[1] & 0x000007f000000000UL) >> 36); /*G*/
-}
-
-/* Patch instruction with "val" where "mask" has 1 bits. */
-void
-ia64_patch (u64 insn_addr, u64 mask, u64 val)
-{
- u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
-# define insn_mask ((1UL << 41) - 1)
- unsigned long shift;
-
- b0 = b[0]; b1 = b[1];
- shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */
- if (shift >= 64) {
- m1 = mask << (shift - 64);
- v1 = val << (shift - 64);
- } else {
- m0 = mask << shift; m1 = mask >> (64 - shift);
- v0 = val << shift; v1 = val >> (64 - shift);
- b[0] = (b0 & ~m0) | (v0 & m0);
- }
- b[1] = (b1 & ~m1) | (v1 & m1);
-}
-
-void
-ia64_patch_imm64 (u64 insn_addr, u64 val)
-{
- /* The assembler may generate offset pointing to either slot 1
- or slot 2 for a long (2-slot) instruction, occupying slots 1
- and 2. */
- insn_addr &= -16UL;
- ia64_patch(insn_addr + 2,
- 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
- | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */
- | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */
- | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */
- | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */));
- ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
-}
-
-void
-ia64_patch_imm60 (u64 insn_addr, u64 val)
-{
- /* The assembler may generate offset pointing to either slot 1
- or slot 2 for a long (2-slot) instruction, occupying slots 1
- and 2. */
- insn_addr &= -16UL;
- ia64_patch(insn_addr + 2,
- 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
- | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */));
- ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18);
-}
-
-/*
- * We need sometimes to load the physical address of a kernel
- * object. Often we can convert the virtual address to physical
- * at execution time, but sometimes (either for performance reasons
- * or during error recovery) we cannot to this. Patch the marked
- * bundles to load the physical address.
- */
-void __init
-ia64_patch_vtop (unsigned long start, unsigned long end)
-{
- s32 *offp = (s32 *) start;
- u64 ip;
-
- while (offp < (s32 *) end) {
- ip = (u64) offp + *offp;
-
- /* replace virtual address with corresponding physical address: */
- ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
- ia64_fc((void *) ip);
- ++offp;
- }
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-/*
- * Disable the RSE workaround by turning the conditional branch
- * that we tagged in each place the workaround was used into an
- * unconditional branch.
- */
-void __init
-ia64_patch_rse (unsigned long start, unsigned long end)
-{
- s32 *offp = (s32 *) start;
- u64 ip, *b;
-
- while (offp < (s32 *) end) {
- ip = (u64) offp + *offp;
-
- b = (u64 *)(ip & -16);
- b[1] &= ~0xf800000L;
- ia64_fc((void *) ip);
- ++offp;
- }
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-void __init
-ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
-{
- static int first_time = 1;
- int need_workaround;
- s32 *offp = (s32 *) start;
- u64 *wp;
-
- need_workaround = (local_cpu_data->family == 0x1f && local_cpu_data->model == 0);
-
- if (first_time) {
- first_time = 0;
- if (need_workaround)
- printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n");
- }
- if (need_workaround)
- return;
-
- while (offp < (s32 *) end) {
- wp = (u64 *) ia64_imva((char *) offp + *offp);
- wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
- wp[1] = 0x0084006880000200UL;
- wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
- wp[3] = 0x0004000000000200UL;
- ia64_fc(wp); ia64_fc(wp + 2);
- ++offp;
- }
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-static void __init
-patch_fsyscall_table (unsigned long start, unsigned long end)
-{
- extern unsigned long fsyscall_table[NR_syscalls];
- s32 *offp = (s32 *) start;
- u64 ip;
-
- while (offp < (s32 *) end) {
- ip = (u64) ia64_imva((char *) offp + *offp);
- ia64_patch_imm64(ip, (u64) fsyscall_table);
- ia64_fc((void *) ip);
- ++offp;
- }
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-static void __init
-patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
-{
- extern char fsys_bubble_down[];
- s32 *offp = (s32 *) start;
- u64 ip;
-
- while (offp < (s32 *) end) {
- ip = (u64) offp + *offp;
- ia64_patch_imm60((u64) ia64_imva((void *) ip),
- (u64) (fsys_bubble_down - (ip & -16)) / 16);
- ia64_fc((void *) ip);
- ++offp;
- }
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-void __init
-ia64_patch_gate (void)
-{
-# define START(name) ((unsigned long) __start_gate_##name##_patchlist)
-# define END(name) ((unsigned long)__end_gate_##name##_patchlist)
-
- patch_fsyscall_table(START(fsyscall), END(fsyscall));
- patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
- ia64_patch_vtop(START(vtop), END(vtop));
- ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
-}
-
-void ia64_patch_phys_stack_reg(unsigned long val)
-{
- s32 * offp = (s32 *) __start___phys_stack_reg_patchlist;
- s32 * end = (s32 *) __end___phys_stack_reg_patchlist;
- u64 ip, mask, imm;
-
- /* see instruction format A4: adds r1 = imm13, r3 */
- mask = (0x3fUL << 27) | (0x7f << 13);
- imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13;
-
- while (offp < end) {
- ip = (u64) offp + *offp;
- ia64_patch(ip, mask, imm);
- ia64_fc((void *)ip);
- ++offp;
- }
- ia64_sync_i();
- ia64_srlz_i();
-}
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
deleted file mode 100644
index c90221733c..0000000000
--- a/arch/ia64/kernel/pci-dma.c
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Dynamic DMA mapping support.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/module.h>
-#include <linux/dmar.h>
-#include <asm/iommu.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <asm/page.h>
-
-int no_iommu __read_mostly;
-#ifdef CONFIG_IOMMU_DEBUG
-int force_iommu __read_mostly = 1;
-#else
-int force_iommu __read_mostly;
-#endif
-
-static int __init pci_iommu_init(void)
-{
- if (iommu_detected)
- intel_iommu_init();
-
- return 0;
-}
-
-/* Must execute after PCI subsystem */
-fs_initcall(pci_iommu_init);
diff --git a/arch/ia64/kernel/perfmon_itanium.h b/arch/ia64/kernel/perfmon_itanium.h
deleted file mode 100644
index dbd04028aa..0000000000
--- a/arch/ia64/kernel/perfmon_itanium.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the Itanium PMU register description tables
- * and pmc checker.
- *
- * Copyright (C) 2002-2003 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-static int pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-
-static pfm_reg_desc_t pfm_ita_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc8 */ { PFM_REG_CONFIG , 0, 0xf00000003ffffff8UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc9 */ { PFM_REG_CONFIG , 0, 0xf00000003ffffff8UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc10 */ { PFM_REG_MONITOR , 6, 0x0UL, -1UL, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0000000010000000UL, -1UL, NULL, pfm_ita_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc13 */ { PFM_REG_CONFIG , 0, 0x0003ffff00000001UL, -1UL, NULL, pfm_ita_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_ita_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd1 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd2 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd3 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
-/* pmd8 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd9 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd10 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd11 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd12 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd13 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd14 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd15 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd16 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd17 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static int
-pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- int ret;
- int is_loaded;
-
- /* sanitfy check */
- if (ctx == NULL) return -EINVAL;
-
- is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
-
- /*
- * we must clear the (instruction) debug registers if pmc13.ta bit is cleared
- * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
- */
- if (cnum == 13 && is_loaded && ((*val & 0x1) == 0UL) && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc13.ta cleared, clearing ibr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(1, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
-
- /*
- * we must clear the (data) debug registers if pmc11.pt bit is cleared
- * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
- */
- if (cnum == 11 && is_loaded && ((*val >> 28)& 0x1) == 0 && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc11.pt cleared, clearing dbr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(0, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
- return 0;
-}
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_ita={
- .pmu_name = "Itanium",
- .pmu_family = 0x7,
- .ovfl_val = (1UL << 32) - 1,
- .pmd_desc = pfm_ita_pmd_desc,
- .pmc_desc = pfm_ita_pmc_desc,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .use_rr_dbregs = 1, /* debug register are use for range retrictions */
-};
-
-
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
deleted file mode 100644
index 9a5cd9fad3..0000000000
--- a/arch/ia64/kernel/process.c
+++ /dev/null
@@ -1,611 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Architecture-specific setup.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
- *
- * 2005-10-07 Keith Owens <kaos@sgi.com>
- * Add notify_die() hooks.
- */
-#include <linux/cpu.h>
-#include <linux/pm.h>
-#include <linux/elf.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/personality.h>
-#include <linux/reboot.h>
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/hotplug.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <linux/stddef.h>
-#include <linux/thread_info.h>
-#include <linux/unistd.h>
-#include <linux/efi.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/kdebug.h>
-#include <linux/utsname.h>
-#include <linux/resume_user_mode.h>
-#include <linux/rcupdate.h>
-
-#include <asm/cpu.h>
-#include <asm/delay.h>
-#include <asm/elf.h>
-#include <asm/irq.h>
-#include <asm/kexec.h>
-#include <asm/processor.h>
-#include <asm/sal.h>
-#include <asm/switch_to.h>
-#include <asm/tlbflush.h>
-#include <linux/uaccess.h>
-#include <asm/unwind.h>
-#include <asm/user.h>
-#include <asm/xtp.h>
-
-#include "entry.h"
-
-#include "sigframe.h"
-
-void (*ia64_mark_idle)(int);
-
-unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
-EXPORT_SYMBOL(boot_option_idle_override);
-void (*pm_power_off) (void);
-EXPORT_SYMBOL(pm_power_off);
-
-static void
-ia64_do_show_stack (struct unw_frame_info *info, void *arg)
-{
- unsigned long ip, sp, bsp;
- const char *loglvl = arg;
-
- printk("%s\nCall Trace:\n", loglvl);
- do {
- unw_get_ip(info, &ip);
- if (ip == 0)
- break;
-
- unw_get_sp(info, &sp);
- unw_get_bsp(info, &bsp);
- printk("%s [<%016lx>] %pS\n"
- " sp=%016lx bsp=%016lx\n",
- loglvl, ip, (void *)ip, sp, bsp);
- } while (unw_unwind(info) >= 0);
-}
-
-void
-show_stack (struct task_struct *task, unsigned long *sp, const char *loglvl)
-{
- if (!task)
- unw_init_running(ia64_do_show_stack, (void *)loglvl);
- else {
- struct unw_frame_info info;
-
- unw_init_from_blocked_task(&info, task);
- ia64_do_show_stack(&info, (void *)loglvl);
- }
-}
-
-void
-show_regs (struct pt_regs *regs)
-{
- unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
-
- print_modules();
- printk("\n");
- show_regs_print_info(KERN_DEFAULT);
- printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
- regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
- init_utsname()->release);
- printk("ip is at %pS\n", (void *)ip);
- printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
- regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
- printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
- regs->ar_rnat, regs->ar_bspstore, regs->pr);
- printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
- regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
- printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
- printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
- printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
- regs->f6.u.bits[1], regs->f6.u.bits[0],
- regs->f7.u.bits[1], regs->f7.u.bits[0]);
- printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
- regs->f8.u.bits[1], regs->f8.u.bits[0],
- regs->f9.u.bits[1], regs->f9.u.bits[0]);
- printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
- regs->f10.u.bits[1], regs->f10.u.bits[0],
- regs->f11.u.bits[1], regs->f11.u.bits[0]);
-
- printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
- printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
- printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
- printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
- printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
- printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
- printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
- printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
- printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
-
- if (user_mode(regs)) {
- /* print the stacked registers */
- unsigned long val, *bsp, ndirty;
- int i, sof, is_nat = 0;
-
- sof = regs->cr_ifs & 0x7f; /* size of frame */
- ndirty = (regs->loadrs >> 19);
- bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
- for (i = 0; i < sof; ++i) {
- get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
- printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
- ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
- }
- } else
- show_stack(NULL, NULL, KERN_DEFAULT);
-}
-
-/* local support for deprecated console_print */
-void
-console_print(const char *s)
-{
- printk(KERN_EMERG "%s", s);
-}
-
-void
-do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
-{
- if (fsys_mode(current, &scr->pt)) {
- /*
- * defer signal-handling etc. until we return to
- * privilege-level 0.
- */
- if (!ia64_psr(&scr->pt)->lp)
- ia64_psr(&scr->pt)->lp = 1;
- return;
- }
-
- /* deal with pending signal delivery */
- if (test_thread_flag(TIF_SIGPENDING) ||
- test_thread_flag(TIF_NOTIFY_SIGNAL)) {
- local_irq_enable(); /* force interrupt enable */
- ia64_do_signal(scr, in_syscall);
- }
-
- if (test_thread_flag(TIF_NOTIFY_RESUME)) {
- local_irq_enable(); /* force interrupt enable */
- resume_user_mode_work(&scr->pt);
- }
-
- /* copy user rbs to kernel rbs */
- if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
- local_irq_enable(); /* force interrupt enable */
- ia64_sync_krbs();
- }
-
- local_irq_disable(); /* force interrupt disable */
-}
-
-static int __init nohalt_setup(char * str)
-{
- cpu_idle_poll_ctrl(true);
- return 1;
-}
-__setup("nohalt", nohalt_setup);
-
-#ifdef CONFIG_HOTPLUG_CPU
-/* We don't actually take CPU down, just spin without interrupts. */
-static inline void __noreturn play_dead(void)
-{
- unsigned int this_cpu = smp_processor_id();
-
- /* Ack it */
- __this_cpu_write(cpu_state, CPU_DEAD);
-
- max_xtp();
- local_irq_disable();
- idle_task_exit();
- ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
- /*
- * The above is a point of no-return, the processor is
- * expected to be in SAL loop now.
- */
- BUG();
-}
-#else
-static inline void __noreturn play_dead(void)
-{
- BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-void __noreturn arch_cpu_idle_dead(void)
-{
- play_dead();
-}
-
-void arch_cpu_idle(void)
-{
- void (*mark_idle)(int) = ia64_mark_idle;
-
-#ifdef CONFIG_SMP
- min_xtp();
-#endif
- rmb();
- if (mark_idle)
- (*mark_idle)(1);
-
- raw_safe_halt();
- raw_local_irq_disable();
-
- if (mark_idle)
- (*mark_idle)(0);
-#ifdef CONFIG_SMP
- normal_xtp();
-#endif
-}
-
-void
-ia64_save_extra (struct task_struct *task)
-{
- if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
- ia64_save_debug_regs(&task->thread.dbr[0]);
-}
-
-void
-ia64_load_extra (struct task_struct *task)
-{
- if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
- ia64_load_debug_regs(&task->thread.dbr[0]);
-}
-
-/*
- * Copy the state of an ia-64 thread.
- *
- * We get here through the following call chain:
- *
- * from user-level: from kernel:
- *
- * <clone syscall> <some kernel call frames>
- * sys_clone :
- * kernel_clone kernel_clone
- * copy_thread copy_thread
- *
- * This means that the stack layout is as follows:
- *
- * +---------------------+ (highest addr)
- * | struct pt_regs |
- * +---------------------+
- * | struct switch_stack |
- * +---------------------+
- * | |
- * | memory stack |
- * | | <-- sp (lowest addr)
- * +---------------------+
- *
- * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
- * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
- * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
- * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since
- * the stack is page aligned and the page size is at least 4KB, this is always the case,
- * so there is nothing to worry about.
- */
-int
-copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
-{
- unsigned long clone_flags = args->flags;
- unsigned long user_stack_base = args->stack;
- unsigned long user_stack_size = args->stack_size;
- unsigned long tls = args->tls;
- extern char ia64_ret_from_clone;
- struct switch_stack *child_stack, *stack;
- unsigned long rbs, child_rbs, rbs_size;
- struct pt_regs *child_ptregs;
- struct pt_regs *regs = current_pt_regs();
- int retval = 0;
-
- child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
- child_stack = (struct switch_stack *) child_ptregs - 1;
-
- rbs = (unsigned long) current + IA64_RBS_OFFSET;
- child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
-
- /* copy parts of thread_struct: */
- p->thread.ksp = (unsigned long) child_stack - 16;
-
- /*
- * NOTE: The calling convention considers all floating point
- * registers in the high partition (fph) to be scratch. Since
- * the only way to get to this point is through a system call,
- * we know that the values in fph are all dead. Hence, there
- * is no need to inherit the fph state from the parent to the
- * child and all we have to do is to make sure that
- * IA64_THREAD_FPH_VALID is cleared in the child.
- *
- * XXX We could push this optimization a bit further by
- * clearing IA64_THREAD_FPH_VALID on ANY system call.
- * However, it's not clear this is worth doing. Also, it
- * would be a slight deviation from the normal Linux system
- * call behavior where scratch registers are preserved across
- * system calls (unless used by the system call itself).
- */
-# define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
- | IA64_THREAD_PM_VALID)
-# define THREAD_FLAGS_TO_SET 0
- p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
- | THREAD_FLAGS_TO_SET);
-
- ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
-
- if (unlikely(args->fn)) {
- if (unlikely(args->idle)) {
- /* fork_idle() called us */
- return 0;
- }
- memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack));
- child_stack->r4 = (unsigned long) args->fn;
- child_stack->r5 = (unsigned long) args->fn_arg;
- /*
- * Preserve PSR bits, except for bits 32-34 and 37-45,
- * which we can't read.
- */
- child_ptregs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
- /* mark as valid, empty frame */
- child_ptregs->cr_ifs = 1UL << 63;
- child_stack->ar_fpsr = child_ptregs->ar_fpsr
- = ia64_getreg(_IA64_REG_AR_FPSR);
- child_stack->pr = (1 << PRED_KERNEL_STACK);
- child_stack->ar_bspstore = child_rbs;
- child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
-
- /* stop some PSR bits from being inherited.
- * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
- * therefore we must specify them explicitly here and not include them in
- * IA64_PSR_BITS_TO_CLEAR.
- */
- child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
- & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
-
- return 0;
- }
- stack = ((struct switch_stack *) regs) - 1;
- /* copy parent's switch_stack & pt_regs to child: */
- memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
-
- /* copy the parent's register backing store to the child: */
- rbs_size = stack->ar_bspstore - rbs;
- memcpy((void *) child_rbs, (void *) rbs, rbs_size);
- if (clone_flags & CLONE_SETTLS)
- child_ptregs->r13 = tls;
- if (user_stack_base) {
- child_ptregs->r12 = user_stack_base + user_stack_size - 16;
- child_ptregs->ar_bspstore = user_stack_base;
- child_ptregs->ar_rnat = 0;
- child_ptregs->loadrs = 0;
- }
- child_stack->ar_bspstore = child_rbs + rbs_size;
- child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
-
- /* stop some PSR bits from being inherited.
- * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
- * therefore we must specify them explicitly here and not include them in
- * IA64_PSR_BITS_TO_CLEAR.
- */
- child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
- & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
- return retval;
-}
-
-asmlinkage long ia64_clone(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stack_size, unsigned long parent_tidptr,
- unsigned long child_tidptr, unsigned long tls)
-{
- struct kernel_clone_args args = {
- .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
- .pidfd = (int __user *)parent_tidptr,
- .child_tid = (int __user *)child_tidptr,
- .parent_tid = (int __user *)parent_tidptr,
- .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
- .stack = stack_start,
- .stack_size = stack_size,
- .tls = tls,
- };
-
- return kernel_clone(&args);
-}
-
-static void
-do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
-{
- unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm;
- unsigned long ip;
- elf_greg_t *dst = arg;
- struct pt_regs *pt;
- char nat;
- int i;
-
- memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */
-
- if (unw_unwind_to_user(info) < 0)
- return;
-
- unw_get_sp(info, &sp);
- pt = (struct pt_regs *) (sp + 16);
-
- urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
-
- if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
- return;
-
- ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
- &ar_rnat);
-
- /*
- * coredump format:
- * r0-r31
- * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
- * predicate registers (p0-p63)
- * b0-b7
- * ip cfm user-mask
- * ar.rsc ar.bsp ar.bspstore ar.rnat
- * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
- */
-
- /* r0 is zero */
- for (i = 1, mask = (1UL << i); i < 32; ++i) {
- unw_get_gr(info, i, &dst[i], &nat);
- if (nat)
- nat_bits |= mask;
- mask <<= 1;
- }
- dst[32] = nat_bits;
- unw_get_pr(info, &dst[33]);
-
- for (i = 0; i < 8; ++i)
- unw_get_br(info, i, &dst[34 + i]);
-
- unw_get_rp(info, &ip);
- dst[42] = ip + ia64_psr(pt)->ri;
- dst[43] = cfm;
- dst[44] = pt->cr_ipsr & IA64_PSR_UM;
-
- unw_get_ar(info, UNW_AR_RSC, &dst[45]);
- /*
- * For bsp and bspstore, unw_get_ar() would return the kernel
- * addresses, but we need the user-level addresses instead:
- */
- dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */
- dst[47] = pt->ar_bspstore;
- dst[48] = ar_rnat;
- unw_get_ar(info, UNW_AR_CCV, &dst[49]);
- unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
- unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
- dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
- unw_get_ar(info, UNW_AR_LC, &dst[53]);
- unw_get_ar(info, UNW_AR_EC, &dst[54]);
- unw_get_ar(info, UNW_AR_CSD, &dst[55]);
- unw_get_ar(info, UNW_AR_SSD, &dst[56]);
-}
-
-static void
-do_copy_regs (struct unw_frame_info *info, void *arg)
-{
- do_copy_task_regs(current, info, arg);
-}
-
-void
-ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
-{
- unw_init_running(do_copy_regs, dst);
-}
-
-/*
- * Flush thread state. This is called when a thread does an execve().
- */
-void
-flush_thread (void)
-{
- /* drop floating-point and debug-register state if it exists: */
- current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
- ia64_drop_fpu(current);
-}
-
-/*
- * Clean up state associated with a thread. This is called when
- * the thread calls exit().
- */
-void
-exit_thread (struct task_struct *tsk)
-{
-
- ia64_drop_fpu(tsk);
-}
-
-unsigned long
-__get_wchan (struct task_struct *p)
-{
- struct unw_frame_info info;
- unsigned long ip;
- int count = 0;
-
- /*
- * Note: p may not be a blocked task (it could be current or
- * another process running on some other CPU. Rather than
- * trying to determine if p is really blocked, we just assume
- * it's blocked and rely on the unwind routines to fail
- * gracefully if the process wasn't really blocked after all.
- * --davidm 99/12/15
- */
- unw_init_from_blocked_task(&info, p);
- do {
- if (task_is_running(p))
- return 0;
- if (unw_unwind(&info) < 0)
- return 0;
- unw_get_ip(&info, &ip);
- if (!in_sched_functions(ip))
- return ip;
- } while (count++ < 16);
- return 0;
-}
-
-void
-cpu_halt (void)
-{
- pal_power_mgmt_info_u_t power_info[8];
- unsigned long min_power;
- int i, min_power_state;
-
- if (ia64_pal_halt_info(power_info) != 0)
- return;
-
- min_power_state = 0;
- min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
- for (i = 1; i < 8; ++i)
- if (power_info[i].pal_power_mgmt_info_s.im
- && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
- min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
- min_power_state = i;
- }
-
- while (1)
- ia64_pal_halt(min_power_state);
-}
-
-void machine_shutdown(void)
-{
- smp_shutdown_nonboot_cpus(reboot_cpu);
-
-#ifdef CONFIG_KEXEC
- kexec_disable_iosapic();
-#endif
-}
-
-void
-machine_restart (char *restart_cmd)
-{
- (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0);
- efi_reboot(REBOOT_WARM, NULL);
-}
-
-void
-machine_halt (void)
-{
- (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0);
- cpu_halt();
-}
-
-void
-machine_power_off (void)
-{
- do_kernel_power_off();
- machine_halt();
-}
-
-EXPORT_SYMBOL(ia64_delay_loop);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
deleted file mode 100644
index 4c41912c55..0000000000
--- a/arch/ia64/kernel/ptrace.c
+++ /dev/null
@@ -1,2012 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Kernel support for the ptrace() and syscall tracing interfaces.
- *
- * Copyright (C) 1999-2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2006 Intel Co
- * 2006-08-12 - IA64 Native Utrace implementation support added by
- * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- *
- * Derived from the x86 and Alpha versions.
- */
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/audit.h>
-#include <linux/signal.h>
-#include <linux/regset.h>
-#include <linux/elf.h>
-#include <linux/resume_user_mode.h>
-
-#include <asm/processor.h>
-#include <asm/ptrace_offsets.h>
-#include <asm/rse.h>
-#include <linux/uaccess.h>
-#include <asm/unwind.h>
-
-#include "entry.h"
-
-/*
- * Bits in the PSR that we allow ptrace() to change:
- * be, up, ac, mfl, mfh (the user mask; five bits total)
- * db (debug breakpoint fault; one bit)
- * id (instruction debug fault disable; one bit)
- * dd (data debug fault disable; one bit)
- * ri (restart instruction; two bits)
- * is (instruction set; one bit)
- */
-#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
- | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
-
-#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
-#define PFM_MASK MASK(38)
-
-#define PTRACE_DEBUG 0
-
-#if PTRACE_DEBUG
-# define dprintk(format...) printk(format)
-# define inline
-#else
-# define dprintk(format...)
-#endif
-
-/* Return TRUE if PT was created due to kernel-entry via a system-call. */
-
-static inline int
-in_syscall (struct pt_regs *pt)
-{
- return (long) pt->cr_ifs >= 0;
-}
-
-/*
- * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
- * bitset where bit i is set iff the NaT bit of register i is set.
- */
-unsigned long
-ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
-{
-# define GET_BITS(first, last, unat) \
- ({ \
- unsigned long bit = ia64_unat_pos(&pt->r##first); \
- unsigned long nbits = (last - first + 1); \
- unsigned long mask = MASK(nbits) << first; \
- unsigned long dist; \
- if (bit < first) \
- dist = 64 + bit - first; \
- else \
- dist = bit - first; \
- ia64_rotr(unat, dist) & mask; \
- })
- unsigned long val;
-
- /*
- * Registers that are stored consecutively in struct pt_regs
- * can be handled in parallel. If the register order in
- * struct_pt_regs changes, this code MUST be updated.
- */
- val = GET_BITS( 1, 1, scratch_unat);
- val |= GET_BITS( 2, 3, scratch_unat);
- val |= GET_BITS(12, 13, scratch_unat);
- val |= GET_BITS(14, 14, scratch_unat);
- val |= GET_BITS(15, 15, scratch_unat);
- val |= GET_BITS( 8, 11, scratch_unat);
- val |= GET_BITS(16, 31, scratch_unat);
- return val;
-
-# undef GET_BITS
-}
-
-/*
- * Set the NaT bits for the scratch registers according to NAT and
- * return the resulting unat (assuming the scratch registers are
- * stored in PT).
- */
-unsigned long
-ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
-{
-# define PUT_BITS(first, last, nat) \
- ({ \
- unsigned long bit = ia64_unat_pos(&pt->r##first); \
- unsigned long nbits = (last - first + 1); \
- unsigned long mask = MASK(nbits) << first; \
- long dist; \
- if (bit < first) \
- dist = 64 + bit - first; \
- else \
- dist = bit - first; \
- ia64_rotl(nat & mask, dist); \
- })
- unsigned long scratch_unat;
-
- /*
- * Registers that are stored consecutively in struct pt_regs
- * can be handled in parallel. If the register order in
- * struct_pt_regs changes, this code MUST be updated.
- */
- scratch_unat = PUT_BITS( 1, 1, nat);
- scratch_unat |= PUT_BITS( 2, 3, nat);
- scratch_unat |= PUT_BITS(12, 13, nat);
- scratch_unat |= PUT_BITS(14, 14, nat);
- scratch_unat |= PUT_BITS(15, 15, nat);
- scratch_unat |= PUT_BITS( 8, 11, nat);
- scratch_unat |= PUT_BITS(16, 31, nat);
-
- return scratch_unat;
-
-# undef PUT_BITS
-}
-
-#define IA64_MLX_TEMPLATE 0x2
-#define IA64_MOVL_OPCODE 6
-
-void
-ia64_increment_ip (struct pt_regs *regs)
-{
- unsigned long w0, ri = ia64_psr(regs)->ri + 1;
-
- if (ri > 2) {
- ri = 0;
- regs->cr_iip += 16;
- } else if (ri == 2) {
- get_user(w0, (char __user *) regs->cr_iip + 0);
- if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
- /*
- * rfi'ing to slot 2 of an MLX bundle causes
- * an illegal operation fault. We don't want
- * that to happen...
- */
- ri = 0;
- regs->cr_iip += 16;
- }
- }
- ia64_psr(regs)->ri = ri;
-}
-
-void
-ia64_decrement_ip (struct pt_regs *regs)
-{
- unsigned long w0, ri = ia64_psr(regs)->ri - 1;
-
- if (ia64_psr(regs)->ri == 0) {
- regs->cr_iip -= 16;
- ri = 2;
- get_user(w0, (char __user *) regs->cr_iip + 0);
- if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
- /*
- * rfi'ing to slot 2 of an MLX bundle causes
- * an illegal operation fault. We don't want
- * that to happen...
- */
- ri = 1;
- }
- }
- ia64_psr(regs)->ri = ri;
-}
-
-/*
- * This routine is used to read an rnat bits that are stored on the
- * kernel backing store. Since, in general, the alignment of the user
- * and kernel are different, this is not completely trivial. In
- * essence, we need to construct the user RNAT based on up to two
- * kernel RNAT values and/or the RNAT value saved in the child's
- * pt_regs.
- *
- * user rbs
- *
- * +--------+ <-- lowest address
- * | slot62 |
- * +--------+
- * | rnat | 0x....1f8
- * +--------+
- * | slot00 | \
- * +--------+ |
- * | slot01 | > child_regs->ar_rnat
- * +--------+ |
- * | slot02 | / kernel rbs
- * +--------+ +--------+
- * <- child_regs->ar_bspstore | slot61 | <-- krbs
- * +- - - - + +--------+
- * | slot62 |
- * +- - - - + +--------+
- * | rnat |
- * +- - - - + +--------+
- * vrnat | slot00 |
- * +- - - - + +--------+
- * = =
- * +--------+
- * | slot00 | \
- * +--------+ |
- * | slot01 | > child_stack->ar_rnat
- * +--------+ |
- * | slot02 | /
- * +--------+
- * <--- child_stack->ar_bspstore
- *
- * The way to think of this code is as follows: bit 0 in the user rnat
- * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
- * value. The kernel rnat value holding this bit is stored in
- * variable rnat0. rnat1 is loaded with the kernel rnat value that
- * form the upper bits of the user rnat value.
- *
- * Boundary cases:
- *
- * o when reading the rnat "below" the first rnat slot on the kernel
- * backing store, rnat0/rnat1 are set to 0 and the low order bits are
- * merged in from pt->ar_rnat.
- *
- * o when reading the rnat "above" the last rnat slot on the kernel
- * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
- */
-static unsigned long
-get_rnat (struct task_struct *task, struct switch_stack *sw,
- unsigned long *krbs, unsigned long *urnat_addr,
- unsigned long *urbs_end)
-{
- unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
- unsigned long umask = 0, mask, m;
- unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
- long num_regs, nbits;
- struct pt_regs *pt;
-
- pt = task_pt_regs(task);
- kbsp = (unsigned long *) sw->ar_bspstore;
- ubspstore = (unsigned long *) pt->ar_bspstore;
-
- if (urbs_end < urnat_addr)
- nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
- else
- nbits = 63;
- mask = MASK(nbits);
- /*
- * First, figure out which bit number slot 0 in user-land maps
- * to in the kernel rnat. Do this by figuring out how many
- * register slots we're beyond the user's backingstore and
- * then computing the equivalent address in kernel space.
- */
- num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
- slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
- shift = ia64_rse_slot_num(slot0_kaddr);
- rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
- rnat0_kaddr = rnat1_kaddr - 64;
-
- if (ubspstore + 63 > urnat_addr) {
- /* some bits need to be merged in from pt->ar_rnat */
- umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
- urnat = (pt->ar_rnat & umask);
- mask &= ~umask;
- if (!mask)
- return urnat;
- }
-
- m = mask << shift;
- if (rnat0_kaddr >= kbsp)
- rnat0 = sw->ar_rnat;
- else if (rnat0_kaddr > krbs)
- rnat0 = *rnat0_kaddr;
- urnat |= (rnat0 & m) >> shift;
-
- m = mask >> (63 - shift);
- if (rnat1_kaddr >= kbsp)
- rnat1 = sw->ar_rnat;
- else if (rnat1_kaddr > krbs)
- rnat1 = *rnat1_kaddr;
- urnat |= (rnat1 & m) << (63 - shift);
- return urnat;
-}
-
-/*
- * The reverse of get_rnat.
- */
-static void
-put_rnat (struct task_struct *task, struct switch_stack *sw,
- unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
- unsigned long *urbs_end)
-{
- unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
- unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
- long num_regs, nbits;
- struct pt_regs *pt;
- unsigned long cfm, *urbs_kargs;
-
- pt = task_pt_regs(task);
- kbsp = (unsigned long *) sw->ar_bspstore;
- ubspstore = (unsigned long *) pt->ar_bspstore;
-
- urbs_kargs = urbs_end;
- if (in_syscall(pt)) {
- /*
- * If entered via syscall, don't allow user to set rnat bits
- * for syscall args.
- */
- cfm = pt->cr_ifs;
- urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
- }
-
- if (urbs_kargs >= urnat_addr)
- nbits = 63;
- else {
- if ((urnat_addr - 63) >= urbs_kargs)
- return;
- nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
- }
- mask = MASK(nbits);
-
- /*
- * First, figure out which bit number slot 0 in user-land maps
- * to in the kernel rnat. Do this by figuring out how many
- * register slots we're beyond the user's backingstore and
- * then computing the equivalent address in kernel space.
- */
- num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
- slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
- shift = ia64_rse_slot_num(slot0_kaddr);
- rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
- rnat0_kaddr = rnat1_kaddr - 64;
-
- if (ubspstore + 63 > urnat_addr) {
- /* some bits need to be place in pt->ar_rnat: */
- umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
- pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
- mask &= ~umask;
- if (!mask)
- return;
- }
- /*
- * Note: Section 11.1 of the EAS guarantees that bit 63 of an
- * rnat slot is ignored. so we don't have to clear it here.
- */
- rnat0 = (urnat << shift);
- m = mask << shift;
- if (rnat0_kaddr >= kbsp)
- sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
- else if (rnat0_kaddr > krbs)
- *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
-
- rnat1 = (urnat >> (63 - shift));
- m = mask >> (63 - shift);
- if (rnat1_kaddr >= kbsp)
- sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
- else if (rnat1_kaddr > krbs)
- *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
-}
-
-static inline int
-on_kernel_rbs (unsigned long addr, unsigned long bspstore,
- unsigned long urbs_end)
-{
- unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
- urbs_end);
- return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
-}
-
-/*
- * Read a word from the user-level backing store of task CHILD. ADDR
- * is the user-level address to read the word from, VAL a pointer to
- * the return value, and USER_BSP gives the end of the user-level
- * backing store (i.e., it's the address that would be in ar.bsp after
- * the user executed a "cover" instruction).
- *
- * This routine takes care of accessing the kernel register backing
- * store for those registers that got spilled there. It also takes
- * care of calculating the appropriate RNaT collection words.
- */
-long
-ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
- unsigned long user_rbs_end, unsigned long addr, long *val)
-{
- unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
- struct pt_regs *child_regs;
- size_t copied;
- long ret;
-
- urbs_end = (long *) user_rbs_end;
- laddr = (unsigned long *) addr;
- child_regs = task_pt_regs(child);
- bspstore = (unsigned long *) child_regs->ar_bspstore;
- krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- if (on_kernel_rbs(addr, (unsigned long) bspstore,
- (unsigned long) urbs_end))
- {
- /*
- * Attempt to read the RBS in an area that's actually
- * on the kernel RBS => read the corresponding bits in
- * the kernel RBS.
- */
- rnat_addr = ia64_rse_rnat_addr(laddr);
- ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
-
- if (laddr == rnat_addr) {
- /* return NaT collection word itself */
- *val = ret;
- return 0;
- }
-
- if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
- /*
- * It is implementation dependent whether the
- * data portion of a NaT value gets saved on a
- * st8.spill or RSE spill (e.g., see EAS 2.6,
- * 4.4.4.6 Register Spill and Fill). To get
- * consistent behavior across all possible
- * IA-64 implementations, we return zero in
- * this case.
- */
- *val = 0;
- return 0;
- }
-
- if (laddr < urbs_end) {
- /*
- * The desired word is on the kernel RBS and
- * is not a NaT.
- */
- regnum = ia64_rse_num_regs(bspstore, laddr);
- *val = *ia64_rse_skip_regs(krbs, regnum);
- return 0;
- }
- }
- copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
- if (copied != sizeof(ret))
- return -EIO;
- *val = ret;
- return 0;
-}
-
-long
-ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
- unsigned long user_rbs_end, unsigned long addr, long val)
-{
- unsigned long *bspstore, *krbs, regnum, *laddr;
- unsigned long *urbs_end = (long *) user_rbs_end;
- struct pt_regs *child_regs;
-
- laddr = (unsigned long *) addr;
- child_regs = task_pt_regs(child);
- bspstore = (unsigned long *) child_regs->ar_bspstore;
- krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- if (on_kernel_rbs(addr, (unsigned long) bspstore,
- (unsigned long) urbs_end))
- {
- /*
- * Attempt to write the RBS in an area that's actually
- * on the kernel RBS => write the corresponding bits
- * in the kernel RBS.
- */
- if (ia64_rse_is_rnat_slot(laddr))
- put_rnat(child, child_stack, krbs, laddr, val,
- urbs_end);
- else {
- if (laddr < urbs_end) {
- regnum = ia64_rse_num_regs(bspstore, laddr);
- *ia64_rse_skip_regs(krbs, regnum) = val;
- }
- }
- } else if (access_process_vm(child, addr, &val, sizeof(val),
- FOLL_FORCE | FOLL_WRITE)
- != sizeof(val))
- return -EIO;
- return 0;
-}
-
-/*
- * Calculate the address of the end of the user-level register backing
- * store. This is the address that would have been stored in ar.bsp
- * if the user had executed a "cover" instruction right before
- * entering the kernel. If CFMP is not NULL, it is used to return the
- * "current frame mask" that was active at the time the kernel was
- * entered.
- */
-unsigned long
-ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
- unsigned long *cfmp)
-{
- unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
- long ndirty;
-
- krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- bspstore = (unsigned long *) pt->ar_bspstore;
- ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
-
- if (in_syscall(pt))
- ndirty += (cfm & 0x7f);
- else
- cfm &= ~(1UL << 63); /* clear valid bit */
-
- if (cfmp)
- *cfmp = cfm;
- return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
-}
-
-/*
- * Synchronize (i.e, write) the RSE backing store living in kernel
- * space to the VM of the CHILD task. SW and PT are the pointers to
- * the switch_stack and pt_regs structures, respectively.
- * USER_RBS_END is the user-level address at which the backing store
- * ends.
- */
-long
-ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
- unsigned long user_rbs_start, unsigned long user_rbs_end)
-{
- unsigned long addr, val;
- long ret;
-
- /* now copy word for word from kernel rbs to user rbs: */
- for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
- ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
- if (ret < 0)
- return ret;
- if (access_process_vm(child, addr, &val, sizeof(val),
- FOLL_FORCE | FOLL_WRITE)
- != sizeof(val))
- return -EIO;
- }
- return 0;
-}
-
-static long
-ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
- unsigned long user_rbs_start, unsigned long user_rbs_end)
-{
- unsigned long addr, val;
- long ret;
-
- /* now copy word for word from user rbs to kernel rbs: */
- for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
- if (access_process_vm(child, addr, &val, sizeof(val),
- FOLL_FORCE)
- != sizeof(val))
- return -EIO;
-
- ret = ia64_poke(child, sw, user_rbs_end, addr, val);
- if (ret < 0)
- return ret;
- }
- return 0;
-}
-
-typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
- unsigned long, unsigned long);
-
-static void do_sync_rbs(struct unw_frame_info *info, void *arg)
-{
- struct pt_regs *pt;
- unsigned long urbs_end;
- syncfunc_t fn = arg;
-
- if (unw_unwind_to_user(info) < 0)
- return;
- pt = task_pt_regs(info->task);
- urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
-
- fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
-}
-
-/*
- * when a thread is stopped (ptraced), debugger might change thread's user
- * stack (change memory directly), and we must avoid the RSE stored in kernel
- * to override user stack (user space's RSE is newer than kernel's in the
- * case). To workaround the issue, we copy kernel RSE to user RSE before the
- * task is stopped, so user RSE has updated data. we then copy user RSE to
- * kernel after the task is resummed from traced stop and kernel will use the
- * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
- * synchronize user RSE to kernel.
- */
-void ia64_ptrace_stop(void)
-{
- if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
- return;
- set_notify_resume(current);
- unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
-}
-
-/*
- * This is called to read back the register backing store.
- */
-void ia64_sync_krbs(void)
-{
- clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
-
- unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
-}
-
-/*
- * Write f32-f127 back to task->thread.fph if it has been modified.
- */
-inline void
-ia64_flush_fph (struct task_struct *task)
-{
- struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
-
- /*
- * Prevent migrating this task while
- * we're fiddling with the FPU state
- */
- preempt_disable();
- if (ia64_is_local_fpu_owner(task) && psr->mfh) {
- psr->mfh = 0;
- task->thread.flags |= IA64_THREAD_FPH_VALID;
- ia64_save_fpu(&task->thread.fph[0]);
- }
- preempt_enable();
-}
-
-/*
- * Sync the fph state of the task so that it can be manipulated
- * through thread.fph. If necessary, f32-f127 are written back to
- * thread.fph or, if the fph state hasn't been used before, thread.fph
- * is cleared to zeroes. Also, access to f32-f127 is disabled to
- * ensure that the task picks up the state from thread.fph when it
- * executes again.
- */
-void
-ia64_sync_fph (struct task_struct *task)
-{
- struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
-
- ia64_flush_fph(task);
- if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
- task->thread.flags |= IA64_THREAD_FPH_VALID;
- memset(&task->thread.fph, 0, sizeof(task->thread.fph));
- }
- ia64_drop_fpu(task);
- psr->dfh = 1;
-}
-
-/*
- * Change the machine-state of CHILD such that it will return via the normal
- * kernel exit-path, rather than the syscall-exit path.
- */
-static void
-convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
- unsigned long cfm)
-{
- struct unw_frame_info info, prev_info;
- unsigned long ip, sp, pr;
-
- unw_init_from_blocked_task(&info, child);
- while (1) {
- prev_info = info;
- if (unw_unwind(&info) < 0)
- return;
-
- unw_get_sp(&info, &sp);
- if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
- < IA64_PT_REGS_SIZE) {
- dprintk("ptrace.%s: ran off the top of the kernel "
- "stack\n", __func__);
- return;
- }
- if (unw_get_pr (&prev_info, &pr) < 0) {
- unw_get_rp(&prev_info, &ip);
- dprintk("ptrace.%s: failed to read "
- "predicate register (ip=0x%lx)\n",
- __func__, ip);
- return;
- }
- if (unw_is_intr_frame(&info)
- && (pr & (1UL << PRED_USER_STACK)))
- break;
- }
-
- /*
- * Note: at the time of this call, the target task is blocked
- * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
- * (aka, "pLvSys") we redirect execution from
- * .work_pending_syscall_end to .work_processed_kernel.
- */
- unw_get_pr(&prev_info, &pr);
- pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
- pr |= (1UL << PRED_NON_SYSCALL);
- unw_set_pr(&prev_info, pr);
-
- pt->cr_ifs = (1UL << 63) | cfm;
- /*
- * Clear the memory that is NOT written on syscall-entry to
- * ensure we do not leak kernel-state to user when execution
- * resumes.
- */
- pt->r2 = 0;
- pt->r3 = 0;
- pt->r14 = 0;
- memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
- memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
- pt->b7 = 0;
- pt->ar_ccv = 0;
- pt->ar_csd = 0;
- pt->ar_ssd = 0;
-}
-
-static int
-access_nat_bits (struct task_struct *child, struct pt_regs *pt,
- struct unw_frame_info *info,
- unsigned long *data, int write_access)
-{
- unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
- char nat = 0;
-
- if (write_access) {
- nat_bits = *data;
- scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
- if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
- dprintk("ptrace: failed to set ar.unat\n");
- return -1;
- }
- for (regnum = 4; regnum <= 7; ++regnum) {
- unw_get_gr(info, regnum, &dummy, &nat);
- unw_set_gr(info, regnum, dummy,
- (nat_bits >> regnum) & 1);
- }
- } else {
- if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
- dprintk("ptrace: failed to read ar.unat\n");
- return -1;
- }
- nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
- for (regnum = 4; regnum <= 7; ++regnum) {
- unw_get_gr(info, regnum, &dummy, &nat);
- nat_bits |= (nat != 0) << regnum;
- }
- *data = nat_bits;
- }
- return 0;
-}
-
-static int
-access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
- unsigned long addr, unsigned long *data, int write_access);
-
-static long
-ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
-{
- unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
- struct unw_frame_info info;
- struct ia64_fpreg fpval;
- struct switch_stack *sw;
- struct pt_regs *pt;
- long ret, retval = 0;
- char nat = 0;
- int i;
-
- if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
- return -EIO;
-
- pt = task_pt_regs(child);
- sw = (struct switch_stack *) (child->thread.ksp + 16);
- unw_init_from_blocked_task(&info, child);
- if (unw_unwind_to_user(&info) < 0) {
- return -EIO;
- }
-
- if (((unsigned long) ppr & 0x7) != 0) {
- dprintk("ptrace:unaligned register address %p\n", ppr);
- return -EIO;
- }
-
- if (access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 0) < 0 ||
- access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 0) < 0 ||
- access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 0) < 0 ||
- access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 0) < 0 ||
- access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 0) < 0 ||
- access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 0) < 0 ||
- access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 0) < 0)
- return -EIO;
-
- /* control regs */
-
- retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
- retval |= __put_user(psr, &ppr->cr_ipsr);
-
- /* app regs */
-
- retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
- retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
- retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
- retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
- retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
- retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
-
- retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
- retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
- retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
- retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
- retval |= __put_user(cfm, &ppr->cfm);
-
- /* gr1-gr3 */
-
- retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
- retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
-
- /* gr4-gr7 */
-
- for (i = 4; i < 8; i++) {
- if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
- return -EIO;
- retval |= __put_user(val, &ppr->gr[i]);
- }
-
- /* gr8-gr11 */
-
- retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
-
- /* gr12-gr15 */
-
- retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
- retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
- retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
-
- /* gr16-gr31 */
-
- retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
-
- /* b0 */
-
- retval |= __put_user(pt->b0, &ppr->br[0]);
-
- /* b1-b5 */
-
- for (i = 1; i < 6; i++) {
- if (unw_access_br(&info, i, &val, 0) < 0)
- return -EIO;
- __put_user(val, &ppr->br[i]);
- }
-
- /* b6-b7 */
-
- retval |= __put_user(pt->b6, &ppr->br[6]);
- retval |= __put_user(pt->b7, &ppr->br[7]);
-
- /* fr2-fr5 */
-
- for (i = 2; i < 6; i++) {
- if (unw_get_fr(&info, i, &fpval) < 0)
- return -EIO;
- retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
- }
-
- /* fr6-fr11 */
-
- retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
- sizeof(struct ia64_fpreg) * 6);
-
- /* fp scratch regs(12-15) */
-
- retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
- sizeof(struct ia64_fpreg) * 4);
-
- /* fr16-fr31 */
-
- for (i = 16; i < 32; i++) {
- if (unw_get_fr(&info, i, &fpval) < 0)
- return -EIO;
- retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
- }
-
- /* fph */
-
- ia64_flush_fph(child);
- retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
- sizeof(ppr->fr[32]) * 96);
-
- /* preds */
-
- retval |= __put_user(pt->pr, &ppr->pr);
-
- /* nat bits */
-
- retval |= __put_user(nat_bits, &ppr->nat);
-
- ret = retval ? -EIO : 0;
- return ret;
-}
-
-static long
-ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
-{
- unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
- struct unw_frame_info info;
- struct switch_stack *sw;
- struct ia64_fpreg fpval;
- struct pt_regs *pt;
- long retval = 0;
- int i;
-
- memset(&fpval, 0, sizeof(fpval));
-
- if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
- return -EIO;
-
- pt = task_pt_regs(child);
- sw = (struct switch_stack *) (child->thread.ksp + 16);
- unw_init_from_blocked_task(&info, child);
- if (unw_unwind_to_user(&info) < 0) {
- return -EIO;
- }
-
- if (((unsigned long) ppr & 0x7) != 0) {
- dprintk("ptrace:unaligned register address %p\n", ppr);
- return -EIO;
- }
-
- /* control regs */
-
- retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
- retval |= __get_user(psr, &ppr->cr_ipsr);
-
- /* app regs */
-
- retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
- retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
- retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
- retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
- retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
- retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
-
- retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
- retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
- retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
- retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
- retval |= __get_user(cfm, &ppr->cfm);
-
- /* gr1-gr3 */
-
- retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
- retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
-
- /* gr4-gr7 */
-
- for (i = 4; i < 8; i++) {
- retval |= __get_user(val, &ppr->gr[i]);
- /* NaT bit will be set via PT_NAT_BITS: */
- if (unw_set_gr(&info, i, val, 0) < 0)
- return -EIO;
- }
-
- /* gr8-gr11 */
-
- retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
-
- /* gr12-gr15 */
-
- retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
- retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
- retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
-
- /* gr16-gr31 */
-
- retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
-
- /* b0 */
-
- retval |= __get_user(pt->b0, &ppr->br[0]);
-
- /* b1-b5 */
-
- for (i = 1; i < 6; i++) {
- retval |= __get_user(val, &ppr->br[i]);
- unw_set_br(&info, i, val);
- }
-
- /* b6-b7 */
-
- retval |= __get_user(pt->b6, &ppr->br[6]);
- retval |= __get_user(pt->b7, &ppr->br[7]);
-
- /* fr2-fr5 */
-
- for (i = 2; i < 6; i++) {
- retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
- if (unw_set_fr(&info, i, fpval) < 0)
- return -EIO;
- }
-
- /* fr6-fr11 */
-
- retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
- sizeof(ppr->fr[6]) * 6);
-
- /* fp scratch regs(12-15) */
-
- retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
- sizeof(ppr->fr[12]) * 4);
-
- /* fr16-fr31 */
-
- for (i = 16; i < 32; i++) {
- retval |= __copy_from_user(&fpval, &ppr->fr[i],
- sizeof(fpval));
- if (unw_set_fr(&info, i, fpval) < 0)
- return -EIO;
- }
-
- /* fph */
-
- ia64_sync_fph(child);
- retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
- sizeof(ppr->fr[32]) * 96);
-
- /* preds */
-
- retval |= __get_user(pt->pr, &ppr->pr);
-
- /* nat bits */
-
- retval |= __get_user(nat_bits, &ppr->nat);
-
- retval |= access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 1);
- retval |= access_elf_reg(child, &info, ELF_AR_RSC_OFFSET, &rsc, 1);
- retval |= access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 1);
- retval |= access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 1);
- retval |= access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 1);
- retval |= access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 1);
- retval |= access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 1);
- retval |= access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 1);
-
- return retval ? -EIO : 0;
-}
-
-void
-user_enable_single_step (struct task_struct *child)
-{
- struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
-
- set_tsk_thread_flag(child, TIF_SINGLESTEP);
- child_psr->ss = 1;
-}
-
-void
-user_enable_block_step (struct task_struct *child)
-{
- struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
-
- set_tsk_thread_flag(child, TIF_SINGLESTEP);
- child_psr->tb = 1;
-}
-
-void
-user_disable_single_step (struct task_struct *child)
-{
- struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
-
- /* make sure the single step/taken-branch trap bits are not set: */
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
- child_psr->ss = 0;
- child_psr->tb = 0;
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure the single step bit is not set.
- */
-void
-ptrace_disable (struct task_struct *child)
-{
- user_disable_single_step(child);
-}
-
-static int
-access_uarea (struct task_struct *child, unsigned long addr,
- unsigned long *data, int write_access);
-
-long
-arch_ptrace (struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- switch (request) {
- case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA:
- /* read word at location addr */
- if (ptrace_access_vm(child, addr, &data, sizeof(data),
- FOLL_FORCE)
- != sizeof(data))
- return -EIO;
- /* ensure return value is not mistaken for error code */
- force_successful_syscall_return();
- return data;
-
- /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
- * by the generic ptrace_request().
- */
-
- case PTRACE_PEEKUSR:
- /* read the word at addr in the USER area */
- if (access_uarea(child, addr, &data, 0) < 0)
- return -EIO;
- /* ensure return value is not mistaken for error code */
- force_successful_syscall_return();
- return data;
-
- case PTRACE_POKEUSR:
- /* write the word at addr in the USER area */
- if (access_uarea(child, addr, &data, 1) < 0)
- return -EIO;
- return 0;
-
- case PTRACE_OLD_GETSIGINFO:
- /* for backwards-compatibility */
- return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
-
- case PTRACE_OLD_SETSIGINFO:
- /* for backwards-compatibility */
- return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
-
- case PTRACE_GETREGS:
- return ptrace_getregs(child,
- (struct pt_all_user_regs __user *) data);
-
- case PTRACE_SETREGS:
- return ptrace_setregs(child,
- (struct pt_all_user_regs __user *) data);
-
- default:
- return ptrace_request(child, request, addr, data);
- }
-}
-
-
-/* "asmlinkage" so the input arguments are preserved... */
-
-asmlinkage long
-syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
- long arg4, long arg5, long arg6, long arg7,
- struct pt_regs regs)
-{
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- if (ptrace_report_syscall_entry(&regs))
- return -ENOSYS;
-
- /* copy user rbs to kernel rbs */
- if (test_thread_flag(TIF_RESTORE_RSE))
- ia64_sync_krbs();
-
-
- audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
-
- return 0;
-}
-
-/* "asmlinkage" so the input arguments are preserved... */
-
-asmlinkage void
-syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
- long arg4, long arg5, long arg6, long arg7,
- struct pt_regs regs)
-{
- int step;
-
- audit_syscall_exit(&regs);
-
- step = test_thread_flag(TIF_SINGLESTEP);
- if (step || test_thread_flag(TIF_SYSCALL_TRACE))
- ptrace_report_syscall_exit(&regs, step);
-
- /* copy user rbs to kernel rbs */
- if (test_thread_flag(TIF_RESTORE_RSE))
- ia64_sync_krbs();
-}
-
-/* Utrace implementation starts here */
-struct regset_get {
- void *kbuf;
- void __user *ubuf;
-};
-
-struct regset_set {
- const void *kbuf;
- const void __user *ubuf;
-};
-
-struct regset_getset {
- struct task_struct *target;
- const struct user_regset *regset;
- union {
- struct regset_get get;
- struct regset_set set;
- } u;
- unsigned int pos;
- unsigned int count;
- int ret;
-};
-
-static const ptrdiff_t pt_offsets[32] =
-{
-#define R(n) offsetof(struct pt_regs, r##n)
- [0] = -1, R(1), R(2), R(3),
- [4] = -1, [5] = -1, [6] = -1, [7] = -1,
- R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
- R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
- R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
-#undef R
-};
-
-static int
-access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
- unsigned long addr, unsigned long *data, int write_access)
-{
- struct pt_regs *pt = task_pt_regs(target);
- unsigned reg = addr / sizeof(unsigned long);
- ptrdiff_t d = pt_offsets[reg];
-
- if (d >= 0) {
- unsigned long *ptr = (void *)pt + d;
- if (write_access)
- *ptr = *data;
- else
- *data = *ptr;
- return 0;
- } else {
- char nat = 0;
- if (write_access) {
- /* read NaT bit first: */
- unsigned long dummy;
- int ret = unw_get_gr(info, reg, &dummy, &nat);
- if (ret < 0)
- return ret;
- }
- return unw_access_gr(info, reg, data, &nat, write_access);
- }
-}
-
-static int
-access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
- unsigned long addr, unsigned long *data, int write_access)
-{
- struct pt_regs *pt;
- unsigned long *ptr = NULL;
-
- pt = task_pt_regs(target);
- switch (addr) {
- case ELF_BR_OFFSET(0):
- ptr = &pt->b0;
- break;
- case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
- return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
- data, write_access);
- case ELF_BR_OFFSET(6):
- ptr = &pt->b6;
- break;
- case ELF_BR_OFFSET(7):
- ptr = &pt->b7;
- }
- if (write_access)
- *ptr = *data;
- else
- *data = *ptr;
- return 0;
-}
-
-static int
-access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
- unsigned long addr, unsigned long *data, int write_access)
-{
- struct pt_regs *pt;
- unsigned long cfm, urbs_end;
- unsigned long *ptr = NULL;
-
- pt = task_pt_regs(target);
- if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
- switch (addr) {
- case ELF_AR_RSC_OFFSET:
- /* force PL3 */
- if (write_access)
- pt->ar_rsc = *data | (3 << 2);
- else
- *data = pt->ar_rsc;
- return 0;
- case ELF_AR_BSP_OFFSET:
- /*
- * By convention, we use PT_AR_BSP to refer to
- * the end of the user-level backing store.
- * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
- * to get the real value of ar.bsp at the time
- * the kernel was entered.
- *
- * Furthermore, when changing the contents of
- * PT_AR_BSP (or PT_CFM) while the task is
- * blocked in a system call, convert the state
- * so that the non-system-call exit
- * path is used. This ensures that the proper
- * state will be picked up when resuming
- * execution. However, it *also* means that
- * once we write PT_AR_BSP/PT_CFM, it won't be
- * possible to modify the syscall arguments of
- * the pending system call any longer. This
- * shouldn't be an issue because modifying
- * PT_AR_BSP/PT_CFM generally implies that
- * we're either abandoning the pending system
- * call or that we defer it's re-execution
- * (e.g., due to GDB doing an inferior
- * function call).
- */
- urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
- if (write_access) {
- if (*data != urbs_end) {
- if (in_syscall(pt))
- convert_to_non_syscall(target,
- pt,
- cfm);
- /*
- * Simulate user-level write
- * of ar.bsp:
- */
- pt->loadrs = 0;
- pt->ar_bspstore = *data;
- }
- } else
- *data = urbs_end;
- return 0;
- case ELF_AR_BSPSTORE_OFFSET:
- ptr = &pt->ar_bspstore;
- break;
- case ELF_AR_RNAT_OFFSET:
- ptr = &pt->ar_rnat;
- break;
- case ELF_AR_CCV_OFFSET:
- ptr = &pt->ar_ccv;
- break;
- case ELF_AR_UNAT_OFFSET:
- ptr = &pt->ar_unat;
- break;
- case ELF_AR_FPSR_OFFSET:
- ptr = &pt->ar_fpsr;
- break;
- case ELF_AR_PFS_OFFSET:
- ptr = &pt->ar_pfs;
- break;
- case ELF_AR_LC_OFFSET:
- return unw_access_ar(info, UNW_AR_LC, data,
- write_access);
- case ELF_AR_EC_OFFSET:
- return unw_access_ar(info, UNW_AR_EC, data,
- write_access);
- case ELF_AR_CSD_OFFSET:
- ptr = &pt->ar_csd;
- break;
- case ELF_AR_SSD_OFFSET:
- ptr = &pt->ar_ssd;
- }
- } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
- switch (addr) {
- case ELF_CR_IIP_OFFSET:
- ptr = &pt->cr_iip;
- break;
- case ELF_CFM_OFFSET:
- urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
- if (write_access) {
- if (((cfm ^ *data) & PFM_MASK) != 0) {
- if (in_syscall(pt))
- convert_to_non_syscall(target,
- pt,
- cfm);
- pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
- | (*data & PFM_MASK));
- }
- } else
- *data = cfm;
- return 0;
- case ELF_CR_IPSR_OFFSET:
- if (write_access) {
- unsigned long tmp = *data;
- /* psr.ri==3 is a reserved value: SDM 2:25 */
- if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
- tmp &= ~IA64_PSR_RI;
- pt->cr_ipsr = ((tmp & IPSR_MASK)
- | (pt->cr_ipsr & ~IPSR_MASK));
- } else
- *data = (pt->cr_ipsr & IPSR_MASK);
- return 0;
- }
- } else if (addr == ELF_NAT_OFFSET)
- return access_nat_bits(target, pt, info,
- data, write_access);
- else if (addr == ELF_PR_OFFSET)
- ptr = &pt->pr;
- else
- return -1;
-
- if (write_access)
- *ptr = *data;
- else
- *data = *ptr;
-
- return 0;
-}
-
-static int
-access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
- unsigned long addr, unsigned long *data, int write_access)
-{
- if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(31))
- return access_elf_gpreg(target, info, addr, data, write_access);
- else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
- return access_elf_breg(target, info, addr, data, write_access);
- else
- return access_elf_areg(target, info, addr, data, write_access);
-}
-
-struct regset_membuf {
- struct membuf to;
- int ret;
-};
-
-static void do_gpregs_get(struct unw_frame_info *info, void *arg)
-{
- struct regset_membuf *dst = arg;
- struct membuf to = dst->to;
- unsigned int n;
- elf_greg_t reg;
-
- if (unw_unwind_to_user(info) < 0)
- return;
-
- /*
- * coredump format:
- * r0-r31
- * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
- * predicate registers (p0-p63)
- * b0-b7
- * ip cfm user-mask
- * ar.rsc ar.bsp ar.bspstore ar.rnat
- * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
- */
-
-
- /* Skip r0 */
- membuf_zero(&to, 8);
- for (n = 8; to.left && n < ELF_AR_END_OFFSET; n += 8) {
- if (access_elf_reg(info->task, info, n, &reg, 0) < 0) {
- dst->ret = -EIO;
- return;
- }
- membuf_store(&to, reg);
- }
-}
-
-static void do_gpregs_set(struct unw_frame_info *info, void *arg)
-{
- struct regset_getset *dst = arg;
-
- if (unw_unwind_to_user(info) < 0)
- return;
-
- if (!dst->count)
- return;
- /* Skip r0 */
- if (dst->pos < ELF_GR_OFFSET(1)) {
- user_regset_copyin_ignore(&dst->pos, &dst->count,
- &dst->u.set.kbuf, &dst->u.set.ubuf,
- 0, ELF_GR_OFFSET(1));
- dst->ret = 0;
- }
-
- while (dst->count && dst->pos < ELF_AR_END_OFFSET) {
- unsigned int n, from, to;
- elf_greg_t tmp[16];
-
- from = dst->pos;
- to = from + sizeof(tmp);
- if (to > ELF_AR_END_OFFSET)
- to = ELF_AR_END_OFFSET;
- /* get up to 16 values */
- dst->ret = user_regset_copyin(&dst->pos, &dst->count,
- &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
- from, to);
- if (dst->ret)
- return;
- /* now copy them into registers */
- for (n = 0; from < dst->pos; from += sizeof(elf_greg_t), n++)
- if (access_elf_reg(dst->target, info, from,
- &tmp[n], 1) < 0) {
- dst->ret = -EIO;
- return;
- }
- }
-}
-
-#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
-
-static void do_fpregs_get(struct unw_frame_info *info, void *arg)
-{
- struct task_struct *task = info->task;
- struct regset_membuf *dst = arg;
- struct membuf to = dst->to;
- elf_fpreg_t reg;
- unsigned int n;
-
- if (unw_unwind_to_user(info) < 0)
- return;
-
- /* Skip pos 0 and 1 */
- membuf_zero(&to, 2 * sizeof(elf_fpreg_t));
-
- /* fr2-fr31 */
- for (n = 2; to.left && n < 32; n++) {
- if (unw_get_fr(info, n, &reg)) {
- dst->ret = -EIO;
- return;
- }
- membuf_write(&to, &reg, sizeof(reg));
- }
-
- /* fph */
- if (!to.left)
- return;
-
- ia64_flush_fph(task);
- if (task->thread.flags & IA64_THREAD_FPH_VALID)
- membuf_write(&to, &task->thread.fph, 96 * sizeof(reg));
- else
- membuf_zero(&to, 96 * sizeof(reg));
-}
-
-static void do_fpregs_set(struct unw_frame_info *info, void *arg)
-{
- struct regset_getset *dst = arg;
- elf_fpreg_t fpreg, tmp[30];
- int index, start, end;
-
- if (unw_unwind_to_user(info) < 0)
- return;
-
- /* Skip pos 0 and 1 */
- if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
- user_regset_copyin_ignore(&dst->pos, &dst->count,
- &dst->u.set.kbuf, &dst->u.set.ubuf,
- 0, ELF_FP_OFFSET(2));
- dst->ret = 0;
- if (dst->count == 0)
- return;
- }
-
- /* fr2-fr31 */
- if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
- start = dst->pos;
- end = min(((unsigned int)ELF_FP_OFFSET(32)),
- dst->pos + dst->count);
- dst->ret = user_regset_copyin(&dst->pos, &dst->count,
- &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
- ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
- if (dst->ret)
- return;
-
- if (start & 0xF) { /* only write high part */
- if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
- &fpreg)) {
- dst->ret = -EIO;
- return;
- }
- tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
- = fpreg.u.bits[0];
- start &= ~0xFUL;
- }
- if (end & 0xF) { /* only write low part */
- if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
- &fpreg)) {
- dst->ret = -EIO;
- return;
- }
- tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
- = fpreg.u.bits[1];
- end = (end + 0xF) & ~0xFUL;
- }
-
- for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
- index = start / sizeof(elf_fpreg_t);
- if (unw_set_fr(info, index, tmp[index - 2])) {
- dst->ret = -EIO;
- return;
- }
- }
- if (dst->ret || dst->count == 0)
- return;
- }
-
- /* fph */
- if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
- ia64_sync_fph(dst->target);
- dst->ret = user_regset_copyin(&dst->pos, &dst->count,
- &dst->u.set.kbuf,
- &dst->u.set.ubuf,
- &dst->target->thread.fph,
- ELF_FP_OFFSET(32), -1);
- }
-}
-
-static void
-unwind_and_call(void (*call)(struct unw_frame_info *, void *),
- struct task_struct *target, void *data)
-{
- if (target == current)
- unw_init_running(call, data);
- else {
- struct unw_frame_info info;
- memset(&info, 0, sizeof(info));
- unw_init_from_blocked_task(&info, target);
- (*call)(&info, data);
- }
-}
-
-static int
-do_regset_call(void (*call)(struct unw_frame_info *, void *),
- struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- struct regset_getset info = { .target = target, .regset = regset,
- .pos = pos, .count = count,
- .u.set = { .kbuf = kbuf, .ubuf = ubuf },
- .ret = 0 };
- unwind_and_call(call, target, &info);
- return info.ret;
-}
-
-static int
-gpregs_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
-{
- struct regset_membuf info = {.to = to};
- unwind_and_call(do_gpregs_get, target, &info);
- return info.ret;
-}
-
-static int gpregs_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return do_regset_call(do_gpregs_set, target, regset, pos, count,
- kbuf, ubuf);
-}
-
-static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
-{
- do_sync_rbs(info, ia64_sync_user_rbs);
-}
-
-/*
- * This is called to write back the register backing store.
- * ptrace does this before it stops, so that a tracer reading the user
- * memory after the thread stops will get the current register data.
- */
-static int
-gpregs_writeback(struct task_struct *target,
- const struct user_regset *regset,
- int now)
-{
- if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
- return 0;
- set_notify_resume(target);
- return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
- NULL, NULL);
-}
-
-static int
-fpregs_active(struct task_struct *target, const struct user_regset *regset)
-{
- return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
-}
-
-static int fpregs_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
-{
- struct regset_membuf info = {.to = to};
- unwind_and_call(do_fpregs_get, target, &info);
- return info.ret;
-}
-
-static int fpregs_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return do_regset_call(do_fpregs_set, target, regset, pos, count,
- kbuf, ubuf);
-}
-
-static int
-access_uarea(struct task_struct *child, unsigned long addr,
- unsigned long *data, int write_access)
-{
- unsigned int pos = -1; /* an invalid value */
- unsigned long *ptr, regnum;
-
- if ((addr & 0x7) != 0) {
- dprintk("ptrace: unaligned register address 0x%lx\n", addr);
- return -1;
- }
- if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
- (addr >= PT_R7 + 8 && addr < PT_B1) ||
- (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
- (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
- dprintk("ptrace: rejecting access to register "
- "address 0x%lx\n", addr);
- return -1;
- }
-
- switch (addr) {
- case PT_F32 ... (PT_F127 + 15):
- pos = addr - PT_F32 + ELF_FP_OFFSET(32);
- break;
- case PT_F2 ... (PT_F5 + 15):
- pos = addr - PT_F2 + ELF_FP_OFFSET(2);
- break;
- case PT_F10 ... (PT_F31 + 15):
- pos = addr - PT_F10 + ELF_FP_OFFSET(10);
- break;
- case PT_F6 ... (PT_F9 + 15):
- pos = addr - PT_F6 + ELF_FP_OFFSET(6);
- break;
- }
-
- if (pos != -1) {
- unsigned reg = pos / sizeof(elf_fpreg_t);
- int which_half = (pos / sizeof(unsigned long)) & 1;
-
- if (reg < 32) { /* fr2-fr31 */
- struct unw_frame_info info;
- elf_fpreg_t fpreg;
-
- memset(&info, 0, sizeof(info));
- unw_init_from_blocked_task(&info, child);
- if (unw_unwind_to_user(&info) < 0)
- return 0;
-
- if (unw_get_fr(&info, reg, &fpreg))
- return -1;
- if (write_access) {
- fpreg.u.bits[which_half] = *data;
- if (unw_set_fr(&info, reg, fpreg))
- return -1;
- } else {
- *data = fpreg.u.bits[which_half];
- }
- } else { /* fph */
- elf_fpreg_t *p = &child->thread.fph[reg - 32];
- unsigned long *bits = &p->u.bits[which_half];
-
- ia64_sync_fph(child);
- if (write_access)
- *bits = *data;
- else if (child->thread.flags & IA64_THREAD_FPH_VALID)
- *data = *bits;
- else
- *data = 0;
- }
- return 0;
- }
-
- switch (addr) {
- case PT_NAT_BITS:
- pos = ELF_NAT_OFFSET;
- break;
- case PT_R4 ... PT_R7:
- pos = addr - PT_R4 + ELF_GR_OFFSET(4);
- break;
- case PT_B1 ... PT_B5:
- pos = addr - PT_B1 + ELF_BR_OFFSET(1);
- break;
- case PT_AR_EC:
- pos = ELF_AR_EC_OFFSET;
- break;
- case PT_AR_LC:
- pos = ELF_AR_LC_OFFSET;
- break;
- case PT_CR_IPSR:
- pos = ELF_CR_IPSR_OFFSET;
- break;
- case PT_CR_IIP:
- pos = ELF_CR_IIP_OFFSET;
- break;
- case PT_CFM:
- pos = ELF_CFM_OFFSET;
- break;
- case PT_AR_UNAT:
- pos = ELF_AR_UNAT_OFFSET;
- break;
- case PT_AR_PFS:
- pos = ELF_AR_PFS_OFFSET;
- break;
- case PT_AR_RSC:
- pos = ELF_AR_RSC_OFFSET;
- break;
- case PT_AR_RNAT:
- pos = ELF_AR_RNAT_OFFSET;
- break;
- case PT_AR_BSPSTORE:
- pos = ELF_AR_BSPSTORE_OFFSET;
- break;
- case PT_PR:
- pos = ELF_PR_OFFSET;
- break;
- case PT_B6:
- pos = ELF_BR_OFFSET(6);
- break;
- case PT_AR_BSP:
- pos = ELF_AR_BSP_OFFSET;
- break;
- case PT_R1 ... PT_R3:
- pos = addr - PT_R1 + ELF_GR_OFFSET(1);
- break;
- case PT_R12 ... PT_R15:
- pos = addr - PT_R12 + ELF_GR_OFFSET(12);
- break;
- case PT_R8 ... PT_R11:
- pos = addr - PT_R8 + ELF_GR_OFFSET(8);
- break;
- case PT_R16 ... PT_R31:
- pos = addr - PT_R16 + ELF_GR_OFFSET(16);
- break;
- case PT_AR_CCV:
- pos = ELF_AR_CCV_OFFSET;
- break;
- case PT_AR_FPSR:
- pos = ELF_AR_FPSR_OFFSET;
- break;
- case PT_B0:
- pos = ELF_BR_OFFSET(0);
- break;
- case PT_B7:
- pos = ELF_BR_OFFSET(7);
- break;
- case PT_AR_CSD:
- pos = ELF_AR_CSD_OFFSET;
- break;
- case PT_AR_SSD:
- pos = ELF_AR_SSD_OFFSET;
- break;
- }
-
- if (pos != -1) {
- struct unw_frame_info info;
-
- memset(&info, 0, sizeof(info));
- unw_init_from_blocked_task(&info, child);
- if (unw_unwind_to_user(&info) < 0)
- return 0;
-
- return access_elf_reg(child, &info, pos, data, write_access);
- }
-
- /* access debug registers */
- if (addr >= PT_IBR) {
- regnum = (addr - PT_IBR) >> 3;
- ptr = &child->thread.ibr[0];
- } else {
- regnum = (addr - PT_DBR) >> 3;
- ptr = &child->thread.dbr[0];
- }
-
- if (regnum >= 8) {
- dprintk("ptrace: rejecting access to register "
- "address 0x%lx\n", addr);
- return -1;
- }
-
- if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
- child->thread.flags |= IA64_THREAD_DBG_VALID;
- memset(child->thread.dbr, 0,
- sizeof(child->thread.dbr));
- memset(child->thread.ibr, 0,
- sizeof(child->thread.ibr));
- }
-
- ptr += regnum;
-
- if ((regnum & 1) && write_access) {
- /* don't let the user set kernel-level breakpoints: */
- *ptr = *data & ~(7UL << 56);
- return 0;
- }
- if (write_access)
- *ptr = *data;
- else
- *data = *ptr;
- return 0;
-}
-
-static const struct user_regset native_regsets[] = {
- {
- .core_note_type = NT_PRSTATUS,
- .n = ELF_NGREG,
- .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
- .regset_get = gpregs_get, .set = gpregs_set,
- .writeback = gpregs_writeback
- },
- {
- .core_note_type = NT_PRFPREG,
- .n = ELF_NFPREG,
- .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
- .regset_get = fpregs_get, .set = fpregs_set, .active = fpregs_active
- },
-};
-
-static const struct user_regset_view user_ia64_view = {
- .name = "ia64",
- .e_machine = EM_IA_64,
- .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
-};
-
-const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
-{
- return &user_ia64_view;
-}
-
-struct syscall_get_args {
- unsigned int i;
- unsigned int n;
- unsigned long *args;
- struct pt_regs *regs;
-};
-
-static void syscall_get_args_cb(struct unw_frame_info *info, void *data)
-{
- struct syscall_get_args *args = data;
- struct pt_regs *pt = args->regs;
- unsigned long *krbs, cfm, ndirty, nlocals, nouts;
- int i, count;
-
- if (unw_unwind_to_user(info) < 0)
- return;
-
- /*
- * We get here via a few paths:
- * - break instruction: cfm is shared with caller.
- * syscall args are in out= regs, locals are non-empty.
- * - epsinstruction: cfm is set by br.call
- * locals don't exist.
- *
- * For both cases arguments are reachable in cfm.sof - cfm.sol.
- * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
- */
- cfm = pt->cr_ifs;
- nlocals = (cfm >> 7) & 0x7f; /* aka sol */
- nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
- krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
- ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
-
- count = 0;
- if (in_syscall(pt))
- count = min_t(int, args->n, nouts);
-
- /* Iterate over outs. */
- for (i = 0; i < count; i++) {
- int j = ndirty + nlocals + i + args->i;
- args->args[i] = *ia64_rse_skip_regs(krbs, j);
- }
-
- while (i < args->n) {
- args->args[i] = 0;
- i++;
- }
-}
-
-void syscall_get_arguments(struct task_struct *task,
- struct pt_regs *regs, unsigned long *args)
-{
- struct syscall_get_args data = {
- .i = 0,
- .n = 6,
- .args = args,
- .regs = regs,
- };
-
- if (task == current)
- unw_init_running(syscall_get_args_cb, &data);
- else {
- struct unw_frame_info ufi;
- memset(&ufi, 0, sizeof(ufi));
- unw_init_from_blocked_task(&ufi, task);
- syscall_get_args_cb(&ufi, &data);
- }
-}
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
deleted file mode 100644
index 527a7b896a..0000000000
--- a/arch/ia64/kernel/relocate_kernel.S
+++ /dev/null
@@ -1,321 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/ia64/kernel/relocate_kernel.S
- *
- * Relocate kexec'able kernel and start it
- *
- * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
- * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
- * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
- */
-#include <linux/pgtable.h>
-#include <asm/asmmacro.h>
-#include <asm/kregs.h>
-#include <asm/page.h>
-#include <asm/mca_asm.h>
-
- /* Must be relocatable PIC code callable as a C function
- */
-GLOBAL_ENTRY(relocate_new_kernel)
- .prologue
- alloc r31=ar.pfs,4,0,0,0
- .body
-.reloc_entry:
-{
- rsm psr.i| psr.ic
- mov r2=ip
-}
- ;;
-{
- flushrs // must be first insn in group
- srlz.i
-}
- ;;
- dep r2=0,r2,61,3 //to physical address
- ;;
- //first switch to physical mode
- add r3=1f-.reloc_entry, r2
- movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
- mov ar.rsc=0 // put RSE in enforced lazy mode
- ;;
- add sp=(memory_stack_end - 16 - .reloc_entry),r2
- add r8=(register_stack - .reloc_entry),r2
- ;;
- mov r18=ar.rnat
- mov ar.bspstore=r8
- ;;
- mov cr.ipsr=r16
- mov cr.iip=r3
- mov cr.ifs=r0
- srlz.i
- ;;
- mov ar.rnat=r18
- rfi // note: this unmask MCA/INIT (psr.mc)
- ;;
-1:
- //physical mode code begin
- mov b6=in1
- dep r28=0,in2,61,3 //to physical address
-
- // purge all TC entries
-#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
- ;;
- addl r17=O(PTCE_STRIDE),r2
- addl r2=O(PTCE_BASE),r2
- ;;
- ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
- ld4 r19=[r2],4 // r19=ptce_count[0]
- ld4 r21=[r17],4 // r21=ptce_stride[0]
- ;;
- ld4 r20=[r2] // r20=ptce_count[1]
- ld4 r22=[r17] // r22=ptce_stride[1]
- mov r24=r0
- ;;
- adds r20=-1,r20
- ;;
-#undef O
-2:
- cmp.ltu p6,p7=r24,r19
-(p7) br.cond.dpnt.few 4f
- mov ar.lc=r20
-3:
- ptc.e r18
- ;;
- add r18=r22,r18
- br.cloop.sptk.few 3b
- ;;
- add r18=r21,r18
- add r24=1,r24
- ;;
- br.sptk.few 2b
-4:
- srlz.i
- ;;
- // purge TR entry for kernel text and data
- movl r16=KERNEL_START
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- ;;
- ptr.i r16, r18
- ptr.d r16, r18
- ;;
- srlz.i
- ;;
-
- // purge TR entry for pal code
- mov r16=in3
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.i r16,r18
- ;;
- srlz.i
- ;;
-
- // purge TR entry for stack
- mov r16=IA64_KR(CURRENT_STACK)
- ;;
- shl r16=r16,IA64_GRANULE_SHIFT
- movl r19=PAGE_OFFSET
- ;;
- add r16=r19,r16
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.d r16,r18
- ;;
- srlz.i
- ;;
-
- //copy segments
- movl r16=PAGE_MASK
- mov r30=in0 // in0 is page_list
- br.sptk.few .dest_page
- ;;
-.loop:
- ld8 r30=[in0], 8;;
-.dest_page:
- tbit.z p0, p6=r30, 0;; // 0x1 dest page
-(p6) and r17=r30, r16
-(p6) br.cond.sptk.few .loop;;
-
- tbit.z p0, p6=r30, 1;; // 0x2 indirect page
-(p6) and in0=r30, r16
-(p6) br.cond.sptk.few .loop;;
-
- tbit.z p0, p6=r30, 2;; // 0x4 end flag
-(p6) br.cond.sptk.few .end_loop;;
-
- tbit.z p6, p0=r30, 3;; // 0x8 source page
-(p6) br.cond.sptk.few .loop
-
- and r18=r30, r16
-
- // simple copy page, may optimize later
- movl r14=PAGE_SIZE/8 - 1;;
- mov ar.lc=r14;;
-1:
- ld8 r14=[r18], 8;;
- st8 [r17]=r14;;
- fc.i r17
- add r17=8, r17
- br.ctop.sptk.few 1b
- br.sptk.few .loop
- ;;
-
-.end_loop:
- sync.i // for fc.i
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
- br.call.sptk.many b0=b6;;
-
-.align 32
-memory_stack:
- .fill 8192, 1, 0
-memory_stack_end:
-register_stack:
- .fill 8192, 1, 0
-register_stack_end:
-relocate_new_kernel_end:
-END(relocate_new_kernel)
-
-.global relocate_new_kernel_size
-relocate_new_kernel_size:
- data8 relocate_new_kernel_end - relocate_new_kernel
-
-GLOBAL_ENTRY(ia64_dump_cpu_regs)
- .prologue
- alloc loc0=ar.pfs,1,2,0,0
- .body
- mov ar.rsc=0 // put RSE in enforced lazy mode
- add loc1=4*8, in0 // save r4 and r5 first
- ;;
-{
- flushrs // flush dirty regs to backing store
- srlz.i
-}
- st8 [loc1]=r4, 8
- ;;
- st8 [loc1]=r5, 8
- ;;
- add loc1=32*8, in0
- mov r4=ar.rnat
- ;;
- st8 [in0]=r0, 8 // r0
- st8 [loc1]=r4, 8 // rnat
- mov r5=pr
- ;;
- st8 [in0]=r1, 8 // r1
- st8 [loc1]=r5, 8 // pr
- mov r4=b0
- ;;
- st8 [in0]=r2, 8 // r2
- st8 [loc1]=r4, 8 // b0
- mov r5=b1;
- ;;
- st8 [in0]=r3, 24 // r3
- st8 [loc1]=r5, 8 // b1
- mov r4=b2
- ;;
- st8 [in0]=r6, 8 // r6
- st8 [loc1]=r4, 8 // b2
- mov r5=b3
- ;;
- st8 [in0]=r7, 8 // r7
- st8 [loc1]=r5, 8 // b3
- mov r4=b4
- ;;
- st8 [in0]=r8, 8 // r8
- st8 [loc1]=r4, 8 // b4
- mov r5=b5
- ;;
- st8 [in0]=r9, 8 // r9
- st8 [loc1]=r5, 8 // b5
- mov r4=b6
- ;;
- st8 [in0]=r10, 8 // r10
- st8 [loc1]=r5, 8 // b6
- mov r5=b7
- ;;
- st8 [in0]=r11, 8 // r11
- st8 [loc1]=r5, 8 // b7
- mov r4=b0
- ;;
- st8 [in0]=r12, 8 // r12
- st8 [loc1]=r4, 8 // ip
- mov r5=loc0
- ;;
- st8 [in0]=r13, 8 // r13
- extr.u r5=r5, 0, 38 // ar.pfs.pfm
- mov r4=r0 // user mask
- ;;
- st8 [in0]=r14, 8 // r14
- st8 [loc1]=r5, 8 // cfm
- ;;
- st8 [in0]=r15, 8 // r15
- st8 [loc1]=r4, 8 // user mask
- mov r5=ar.rsc
- ;;
- st8 [in0]=r16, 8 // r16
- st8 [loc1]=r5, 8 // ar.rsc
- mov r4=ar.bsp
- ;;
- st8 [in0]=r17, 8 // r17
- st8 [loc1]=r4, 8 // ar.bsp
- mov r5=ar.bspstore
- ;;
- st8 [in0]=r18, 8 // r18
- st8 [loc1]=r5, 8 // ar.bspstore
- mov r4=ar.rnat
- ;;
- st8 [in0]=r19, 8 // r19
- st8 [loc1]=r4, 8 // ar.rnat
- mov r5=ar.ccv
- ;;
- st8 [in0]=r20, 8 // r20
- st8 [loc1]=r5, 8 // ar.ccv
- mov r4=ar.unat
- ;;
- st8 [in0]=r21, 8 // r21
- st8 [loc1]=r4, 8 // ar.unat
- mov r5 = ar.fpsr
- ;;
- st8 [in0]=r22, 8 // r22
- st8 [loc1]=r5, 8 // ar.fpsr
- mov r4 = ar.unat
- ;;
- st8 [in0]=r23, 8 // r23
- st8 [loc1]=r4, 8 // unat
- mov r5 = ar.fpsr
- ;;
- st8 [in0]=r24, 8 // r24
- st8 [loc1]=r5, 8 // fpsr
- mov r4 = ar.pfs
- ;;
- st8 [in0]=r25, 8 // r25
- st8 [loc1]=r4, 8 // ar.pfs
- mov r5 = ar.lc
- ;;
- st8 [in0]=r26, 8 // r26
- st8 [loc1]=r5, 8 // ar.lc
- mov r4 = ar.ec
- ;;
- st8 [in0]=r27, 8 // r27
- st8 [loc1]=r4, 8 // ar.ec
- mov r5 = ar.csd
- ;;
- st8 [in0]=r28, 8 // r28
- st8 [loc1]=r5, 8 // ar.csd
- mov r4 = ar.ssd
- ;;
- st8 [in0]=r29, 8 // r29
- st8 [loc1]=r4, 8 // ar.ssd
- ;;
- st8 [in0]=r30, 8 // r30
- ;;
- st8 [in0]=r31, 8 // r31
- mov ar.pfs=loc0
- ;;
- br.ret.sptk.many rp
-END(ia64_dump_cpu_regs)
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
deleted file mode 100644
index e4f0705c02..0000000000
--- a/arch/ia64/kernel/sal.c
+++ /dev/null
@@ -1,400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * System Abstraction Layer (SAL) interface routines.
- *
- * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-
-#include <asm/delay.h>
-#include <asm/page.h>
-#include <asm/sal.h>
-#include <asm/pal.h>
-#include <asm/xtp.h>
-
- __cacheline_aligned DEFINE_SPINLOCK(sal_lock);
-unsigned long sal_platform_features;
-
-unsigned short sal_revision;
-unsigned short sal_version;
-
-#define SAL_MAJOR(x) ((x) >> 8)
-#define SAL_MINOR(x) ((x) & 0xff)
-
-static struct {
- void *addr; /* function entry point */
- void *gpval; /* gp value to use */
-} pdesc;
-
-static long
-default_handler (void)
-{
- return -1;
-}
-
-ia64_sal_handler ia64_sal = (ia64_sal_handler) default_handler;
-ia64_sal_desc_ptc_t *ia64_ptc_domain_info;
-
-const char *
-ia64_sal_strerror (long status)
-{
- const char *str;
- switch (status) {
- case 0: str = "Call completed without error"; break;
- case 1: str = "Effect a warm boot of the system to complete "
- "the update"; break;
- case -1: str = "Not implemented"; break;
- case -2: str = "Invalid argument"; break;
- case -3: str = "Call completed with error"; break;
- case -4: str = "Virtual address not registered"; break;
- case -5: str = "No information available"; break;
- case -6: str = "Insufficient space to add the entry"; break;
- case -7: str = "Invalid entry_addr value"; break;
- case -8: str = "Invalid interrupt vector"; break;
- case -9: str = "Requested memory not available"; break;
- case -10: str = "Unable to write to the NVM device"; break;
- case -11: str = "Invalid partition type specified"; break;
- case -12: str = "Invalid NVM_Object id specified"; break;
- case -13: str = "NVM_Object already has the maximum number "
- "of partitions"; break;
- case -14: str = "Insufficient space in partition for the "
- "requested write sub-function"; break;
- case -15: str = "Insufficient data buffer space for the "
- "requested read record sub-function"; break;
- case -16: str = "Scratch buffer required for the write/delete "
- "sub-function"; break;
- case -17: str = "Insufficient space in the NVM_Object for the "
- "requested create sub-function"; break;
- case -18: str = "Invalid value specified in the partition_rec "
- "argument"; break;
- case -19: str = "Record oriented I/O not supported for this "
- "partition"; break;
- case -20: str = "Bad format of record to be written or "
- "required keyword variable not "
- "specified"; break;
- default: str = "Unknown SAL status code"; break;
- }
- return str;
-}
-
-void __init
-ia64_sal_handler_init (void *entry_point, void *gpval)
-{
- /* fill in the SAL procedure descriptor and point ia64_sal to it: */
- pdesc.addr = entry_point;
- pdesc.gpval = gpval;
- ia64_sal = (ia64_sal_handler) &pdesc;
-}
-
-static void __init
-check_versions (struct ia64_sal_systab *systab)
-{
- sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor;
- sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor;
-
- /* Check for broken firmware */
- if ((sal_revision == SAL_VERSION_CODE(49, 29))
- && (sal_version == SAL_VERSION_CODE(49, 29)))
- {
- /*
- * Old firmware for zx2000 prototypes have this weird version number,
- * reset it to something sane.
- */
- sal_revision = SAL_VERSION_CODE(2, 8);
- sal_version = SAL_VERSION_CODE(0, 0);
- }
-}
-
-static void __init
-sal_desc_entry_point (void *p)
-{
- struct ia64_sal_desc_entry_point *ep = p;
- ia64_pal_handler_init(__va(ep->pal_proc));
- ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp));
-}
-
-#ifdef CONFIG_SMP
-static void __init
-set_smp_redirect (int flag)
-{
-#ifndef CONFIG_HOTPLUG_CPU
- if (no_int_routing)
- smp_int_redirect &= ~flag;
- else
- smp_int_redirect |= flag;
-#else
- /*
- * For CPU Hotplug we dont want to do any chipset supported
- * interrupt redirection. The reason is this would require that
- * All interrupts be stopped and hard bind the irq to a cpu.
- * Later when the interrupt is fired we need to set the redir hint
- * on again in the vector. This is cumbersome for something that the
- * user mode irq balancer will solve anyways.
- */
- no_int_routing=1;
- smp_int_redirect &= ~flag;
-#endif
-}
-#else
-#define set_smp_redirect(flag) do { } while (0)
-#endif
-
-static void __init
-sal_desc_platform_feature (void *p)
-{
- struct ia64_sal_desc_platform_feature *pf = p;
- sal_platform_features = pf->feature_mask;
-
- printk(KERN_INFO "SAL Platform features:");
- if (!sal_platform_features) {
- printk(" None\n");
- return;
- }
-
- if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_BUS_LOCK)
- printk(" BusLock");
- if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT) {
- printk(" IRQ_Redirection");
- set_smp_redirect(SMP_IRQ_REDIRECTION);
- }
- if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT) {
- printk(" IPI_Redirection");
- set_smp_redirect(SMP_IPI_REDIRECTION);
- }
- if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)
- printk(" ITC_Drift");
- printk("\n");
-}
-
-#ifdef CONFIG_SMP
-static void __init
-sal_desc_ap_wakeup (void *p)
-{
- struct ia64_sal_desc_ap_wakeup *ap = p;
-
- switch (ap->mechanism) {
- case IA64_SAL_AP_EXTERNAL_INT:
- ap_wakeup_vector = ap->vector;
- printk(KERN_INFO "SAL: AP wakeup using external interrupt "
- "vector 0x%lx\n", ap_wakeup_vector);
- break;
- default:
- printk(KERN_ERR "SAL: AP wakeup mechanism unsupported!\n");
- break;
- }
-}
-
-static void __init
-chk_nointroute_opt(void)
-{
- char *cp;
-
- for (cp = boot_command_line; *cp; ) {
- if (memcmp(cp, "nointroute", 10) == 0) {
- no_int_routing = 1;
- printk ("no_int_routing on\n");
- break;
- } else {
- while (*cp != ' ' && *cp)
- ++cp;
- while (*cp == ' ')
- ++cp;
- }
- }
-}
-
-#else
-static void __init sal_desc_ap_wakeup(void *p) { }
-#endif
-
-/*
- * HP rx5670 firmware polls for interrupts during SAL_CACHE_FLUSH by reading
- * cr.ivr, but it never writes cr.eoi. This leaves any interrupt marked as
- * "in-service" and masks other interrupts of equal or lower priority.
- *
- * HP internal defect reports: F1859, F2775, F3031.
- */
-static int sal_cache_flush_drops_interrupts;
-
-static int __init
-force_pal_cache_flush(char *str)
-{
- sal_cache_flush_drops_interrupts = 1;
- return 0;
-}
-early_param("force_pal_cache_flush", force_pal_cache_flush);
-
-void __init
-check_sal_cache_flush (void)
-{
- unsigned long flags;
- int cpu;
- u64 vector, cache_type = 3;
- struct ia64_sal_retval isrv;
-
- if (sal_cache_flush_drops_interrupts)
- return;
-
- cpu = get_cpu();
- local_irq_save(flags);
-
- /*
- * Send ourselves a timer interrupt, wait until it's reported, and see
- * if SAL_CACHE_FLUSH drops it.
- */
- ia64_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0);
-
- while (!ia64_get_irr(IA64_TIMER_VECTOR))
- cpu_relax();
-
- SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
-
- if (isrv.status)
- printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status);
-
- if (ia64_get_irr(IA64_TIMER_VECTOR)) {
- vector = ia64_get_ivr();
- ia64_eoi();
- WARN_ON(vector != IA64_TIMER_VECTOR);
- } else {
- sal_cache_flush_drops_interrupts = 1;
- printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; "
- "PAL_CACHE_FLUSH will be used instead\n");
- ia64_eoi();
- }
-
- local_irq_restore(flags);
- put_cpu();
-}
-
-s64
-ia64_sal_cache_flush (u64 cache_type)
-{
- struct ia64_sal_retval isrv;
-
- if (sal_cache_flush_drops_interrupts) {
- unsigned long flags;
- u64 progress;
- s64 rc;
-
- progress = 0;
- local_irq_save(flags);
- rc = ia64_pal_cache_flush(cache_type,
- PAL_CACHE_FLUSH_INVALIDATE, &progress, NULL);
- local_irq_restore(flags);
- return rc;
- }
-
- SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
- return isrv.status;
-}
-EXPORT_SYMBOL_GPL(ia64_sal_cache_flush);
-
-void __init
-ia64_sal_init (struct ia64_sal_systab *systab)
-{
- char *p;
- int i;
-
- if (!systab) {
- printk(KERN_WARNING "Hmm, no SAL System Table.\n");
- return;
- }
-
- if (strncmp(systab->signature, "SST_", 4) != 0)
- printk(KERN_ERR "bad signature in system table!");
-
- check_versions(systab);
-#ifdef CONFIG_SMP
- chk_nointroute_opt();
-#endif
-
- /* revisions are coded in BCD, so %x does the job for us */
- printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n",
- SAL_MAJOR(sal_revision), SAL_MINOR(sal_revision),
- systab->oem_id, systab->product_id,
- systab->product_id[0] ? " " : "",
- SAL_MAJOR(sal_version), SAL_MINOR(sal_version));
-
- p = (char *) (systab + 1);
- for (i = 0; i < systab->entry_count; i++) {
- /*
- * The first byte of each entry type contains the type
- * descriptor.
- */
- switch (*p) {
- case SAL_DESC_ENTRY_POINT:
- sal_desc_entry_point(p);
- break;
- case SAL_DESC_PLATFORM_FEATURE:
- sal_desc_platform_feature(p);
- break;
- case SAL_DESC_PTC:
- ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p;
- break;
- case SAL_DESC_AP_WAKEUP:
- sal_desc_ap_wakeup(p);
- break;
- }
- p += SAL_DESC_SIZE(*p);
- }
-
-}
-
-int
-ia64_sal_oemcall(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
- u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7)
-{
- if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
- return -1;
- SAL_CALL(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
- return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall);
-
-int
-ia64_sal_oemcall_nolock(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
- u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
- u64 arg7)
-{
- if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
- return -1;
- SAL_CALL_NOLOCK(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
- arg7);
- return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall_nolock);
-
-int
-ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
- u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5,
- u64 arg6, u64 arg7)
-{
- if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
- return -1;
- SAL_CALL_REENTRANT(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
- arg7);
- return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
-
-long
-ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
- unsigned long *drift_info)
-{
- struct ia64_sal_retval isrv;
-
- SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
- *ticks_per_second = isrv.v0;
- *drift_info = isrv.v1;
- return isrv.status;
-}
-EXPORT_SYMBOL_GPL(ia64_sal_freq_base);
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
deleted file mode 100644
index 03b632c568..0000000000
--- a/arch/ia64/kernel/salinfo.c
+++ /dev/null
@@ -1,646 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * salinfo.c
- *
- * Creates entries in /proc/sal for various system features.
- *
- * Copyright (c) 2003, 2006 Silicon Graphics, Inc. All rights reserved.
- * Copyright (c) 2003 Hewlett-Packard Co
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * 10/30/2001 jbarnes@sgi.com copied much of Stephane's palinfo
- * code to create this file
- * Oct 23 2003 kaos@sgi.com
- * Replace IPI with set_cpus_allowed() to read a record from the required cpu.
- * Redesign salinfo log processing to separate interrupt and user space
- * contexts.
- * Cache the record across multi-block reads from user space.
- * Support > 64 cpus.
- * Delete module_exit and MOD_INC/DEC_COUNT, salinfo cannot be a module.
- *
- * Jan 28 2004 kaos@sgi.com
- * Periodically check for outstanding MCA or INIT records.
- *
- * Dec 5 2004 kaos@sgi.com
- * Standardize which records are cleared automatically.
- *
- * Aug 18 2005 kaos@sgi.com
- * mca.c may not pass a buffer, a NULL buffer just indicates that a new
- * record is available in SAL.
- * Replace some NR_CPUS by cpus_online, for hotplug cpu.
- *
- * Jan 5 2006 kaos@sgi.com
- * Handle hotplug cpus coming online.
- * Handle hotplug cpus going offline while they still have outstanding records.
- * Use the cpu_* macros consistently.
- * Replace the counting semaphore with a mutex and a test if the cpumask is non-empty.
- * Modify the locking to make the test for "work to do" an atomic operation.
- */
-
-#include <linux/capability.h>
-#include <linux/cpu.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/timer.h>
-#include <linux/vmalloc.h>
-#include <linux/semaphore.h>
-
-#include <asm/sal.h>
-#include <linux/uaccess.h>
-
-MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
-MODULE_DESCRIPTION("/proc interface to IA-64 SAL features");
-MODULE_LICENSE("GPL");
-
-typedef struct {
- const char *name; /* name of the proc entry */
- unsigned long feature; /* feature bit */
- struct proc_dir_entry *entry; /* registered entry (removal) */
-} salinfo_entry_t;
-
-/*
- * List {name,feature} pairs for every entry in /proc/sal/<feature>
- * that this module exports
- */
-static const salinfo_entry_t salinfo_entries[]={
- { "bus_lock", IA64_SAL_PLATFORM_FEATURE_BUS_LOCK, },
- { "irq_redirection", IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT, },
- { "ipi_redirection", IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT, },
- { "itc_drift", IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, },
-};
-
-#define NR_SALINFO_ENTRIES ARRAY_SIZE(salinfo_entries)
-
-static char *salinfo_log_name[] = {
- "mca",
- "init",
- "cmc",
- "cpe",
-};
-
-static struct proc_dir_entry *salinfo_proc_entries[
- ARRAY_SIZE(salinfo_entries) + /* /proc/sal/bus_lock */
- ARRAY_SIZE(salinfo_log_name) + /* /proc/sal/{mca,...} */
- (2 * ARRAY_SIZE(salinfo_log_name)) + /* /proc/sal/mca/{event,data} */
- 1]; /* /proc/sal */
-
-/* Some records we get ourselves, some are accessed as saved data in buffers
- * that are owned by mca.c.
- */
-struct salinfo_data_saved {
- u8* buffer;
- u64 size;
- u64 id;
- int cpu;
-};
-
-/* State transitions. Actions are :-
- * Write "read <cpunum>" to the data file.
- * Write "clear <cpunum>" to the data file.
- * Write "oemdata <cpunum> <offset> to the data file.
- * Read from the data file.
- * Close the data file.
- *
- * Start state is NO_DATA.
- *
- * NO_DATA
- * write "read <cpunum>" -> NO_DATA or LOG_RECORD.
- * write "clear <cpunum>" -> NO_DATA or LOG_RECORD.
- * write "oemdata <cpunum> <offset> -> return -EINVAL.
- * read data -> return EOF.
- * close -> unchanged. Free record areas.
- *
- * LOG_RECORD
- * write "read <cpunum>" -> NO_DATA or LOG_RECORD.
- * write "clear <cpunum>" -> NO_DATA or LOG_RECORD.
- * write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA.
- * read data -> return the INIT/MCA/CMC/CPE record.
- * close -> unchanged. Keep record areas.
- *
- * OEMDATA
- * write "read <cpunum>" -> NO_DATA or LOG_RECORD.
- * write "clear <cpunum>" -> NO_DATA or LOG_RECORD.
- * write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA.
- * read data -> return the formatted oemdata.
- * close -> unchanged. Keep record areas.
- *
- * Closing the data file does not change the state. This allows shell scripts
- * to manipulate salinfo data, each shell redirection opens the file, does one
- * action then closes it again. The record areas are only freed at close when
- * the state is NO_DATA.
- */
-enum salinfo_state {
- STATE_NO_DATA,
- STATE_LOG_RECORD,
- STATE_OEMDATA,
-};
-
-struct salinfo_data {
- cpumask_t cpu_event; /* which cpus have outstanding events */
- wait_queue_head_t read_wait;
- u8 *log_buffer;
- u64 log_size;
- u8 *oemdata; /* decoded oem data */
- u64 oemdata_size;
- int open; /* single-open to prevent races */
- u8 type;
- u8 saved_num; /* using a saved record? */
- enum salinfo_state state :8; /* processing state */
- u8 padding;
- int cpu_check; /* next CPU to check */
- struct salinfo_data_saved data_saved[5];/* save last 5 records from mca.c, must be < 255 */
-};
-
-static struct salinfo_data salinfo_data[ARRAY_SIZE(salinfo_log_name)];
-
-static DEFINE_SPINLOCK(data_lock);
-static DEFINE_SPINLOCK(data_saved_lock);
-
-/** salinfo_platform_oemdata - optional callback to decode oemdata from an error
- * record.
- * @sect_header: pointer to the start of the section to decode.
- * @oemdata: returns vmalloc area containing the decoded output.
- * @oemdata_size: returns length of decoded output (strlen).
- *
- * Description: If user space asks for oem data to be decoded by the kernel
- * and/or prom and the platform has set salinfo_platform_oemdata to the address
- * of a platform specific routine then call that routine. salinfo_platform_oemdata
- * vmalloc's and formats its output area, returning the address of the text
- * and its strlen. Returns 0 for success, -ve for error. The callback is
- * invoked on the cpu that generated the error record.
- */
-int (*salinfo_platform_oemdata)(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size);
-
-struct salinfo_platform_oemdata_parms {
- const u8 *efi_guid;
- u8 **oemdata;
- u64 *oemdata_size;
-};
-
-static long
-salinfo_platform_oemdata_cpu(void *context)
-{
- struct salinfo_platform_oemdata_parms *parms = context;
-
- return salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
-}
-
-static void
-shift1_data_saved (struct salinfo_data *data, int shift)
-{
- memcpy(data->data_saved+shift, data->data_saved+shift+1,
- (ARRAY_SIZE(data->data_saved) - (shift+1)) * sizeof(data->data_saved[0]));
- memset(data->data_saved + ARRAY_SIZE(data->data_saved) - 1, 0,
- sizeof(data->data_saved[0]));
-}
-
-/* This routine is invoked in interrupt context. Note: mca.c enables
- * interrupts before calling this code for CMC/CPE. MCA and INIT events are
- * not irq safe, do not call any routines that use spinlocks, they may deadlock.
- * MCA and INIT records are recorded, a timer event will look for any
- * outstanding events and wake up the user space code.
- *
- * The buffer passed from mca.c points to the output from ia64_log_get. This is
- * a persistent buffer but its contents can change between the interrupt and
- * when user space processes the record. Save the record id to identify
- * changes. If the buffer is NULL then just update the bitmap.
- */
-void
-salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
-{
- struct salinfo_data *data = salinfo_data + type;
- struct salinfo_data_saved *data_saved;
- unsigned long flags = 0;
- int i;
- int saved_size = ARRAY_SIZE(data->data_saved);
-
- BUG_ON(type >= ARRAY_SIZE(salinfo_log_name));
-
- if (irqsafe)
- spin_lock_irqsave(&data_saved_lock, flags);
- if (buffer) {
- for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
- if (!data_saved->buffer)
- break;
- }
- if (i == saved_size) {
- if (!data->saved_num) {
- shift1_data_saved(data, 0);
- data_saved = data->data_saved + saved_size - 1;
- } else
- data_saved = NULL;
- }
- if (data_saved) {
- data_saved->cpu = smp_processor_id();
- data_saved->id = ((sal_log_record_header_t *)buffer)->id;
- data_saved->size = size;
- data_saved->buffer = buffer;
- }
- }
- cpumask_set_cpu(smp_processor_id(), &data->cpu_event);
- if (irqsafe) {
- wake_up_interruptible(&data->read_wait);
- spin_unlock_irqrestore(&data_saved_lock, flags);
- }
-}
-
-/* Check for outstanding MCA/INIT records every minute (arbitrary) */
-#define SALINFO_TIMER_DELAY (60*HZ)
-static struct timer_list salinfo_timer;
-extern void ia64_mlogbuf_dump(void);
-
-static void
-salinfo_timeout_check(struct salinfo_data *data)
-{
- if (!data->open)
- return;
- if (!cpumask_empty(&data->cpu_event))
- wake_up_interruptible(&data->read_wait);
-}
-
-static void
-salinfo_timeout(struct timer_list *unused)
-{
- ia64_mlogbuf_dump();
- salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA);
- salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT);
- salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY;
- add_timer(&salinfo_timer);
-}
-
-static int
-salinfo_event_open(struct inode *inode, struct file *file)
-{
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- return 0;
-}
-
-static ssize_t
-salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
-{
- struct salinfo_data *data = pde_data(file_inode(file));
- char cmd[32];
- size_t size;
- int i, n, cpu = -1;
-
-retry:
- if (cpumask_empty(&data->cpu_event)) {
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
- if (wait_event_interruptible(data->read_wait,
- !cpumask_empty(&data->cpu_event)))
- return -EINTR;
- }
-
- n = data->cpu_check;
- for (i = 0; i < nr_cpu_ids; i++) {
- if (cpumask_test_cpu(n, &data->cpu_event)) {
- if (!cpu_online(n)) {
- cpumask_clear_cpu(n, &data->cpu_event);
- continue;
- }
- cpu = n;
- break;
- }
- if (++n == nr_cpu_ids)
- n = 0;
- }
-
- if (cpu == -1)
- goto retry;
-
- ia64_mlogbuf_dump();
-
- /* for next read, start checking at next CPU */
- data->cpu_check = cpu;
- if (++data->cpu_check == nr_cpu_ids)
- data->cpu_check = 0;
-
- snprintf(cmd, sizeof(cmd), "read %d\n", cpu);
-
- size = strlen(cmd);
- if (size > count)
- size = count;
- if (copy_to_user(buffer, cmd, size))
- return -EFAULT;
-
- return size;
-}
-
-static const struct proc_ops salinfo_event_proc_ops = {
- .proc_open = salinfo_event_open,
- .proc_read = salinfo_event_read,
- .proc_lseek = noop_llseek,
-};
-
-static int
-salinfo_log_open(struct inode *inode, struct file *file)
-{
- struct salinfo_data *data = pde_data(inode);
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- spin_lock(&data_lock);
- if (data->open) {
- spin_unlock(&data_lock);
- return -EBUSY;
- }
- data->open = 1;
- spin_unlock(&data_lock);
-
- if (data->state == STATE_NO_DATA &&
- !(data->log_buffer = vmalloc(ia64_sal_get_state_info_size(data->type)))) {
- data->open = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int
-salinfo_log_release(struct inode *inode, struct file *file)
-{
- struct salinfo_data *data = pde_data(inode);
-
- if (data->state == STATE_NO_DATA) {
- vfree(data->log_buffer);
- vfree(data->oemdata);
- data->log_buffer = NULL;
- data->oemdata = NULL;
- }
- spin_lock(&data_lock);
- data->open = 0;
- spin_unlock(&data_lock);
- return 0;
-}
-
-static long
-salinfo_log_read_cpu(void *context)
-{
- struct salinfo_data *data = context;
- sal_log_record_header_t *rh;
- data->log_size = ia64_sal_get_state_info(data->type, (u64 *) data->log_buffer);
- rh = (sal_log_record_header_t *)(data->log_buffer);
- /* Clear corrected errors as they are read from SAL */
- if (rh->severity == sal_log_severity_corrected)
- ia64_sal_clear_state_info(data->type);
- return 0;
-}
-
-static void
-salinfo_log_new_read(int cpu, struct salinfo_data *data)
-{
- struct salinfo_data_saved *data_saved;
- unsigned long flags;
- int i;
- int saved_size = ARRAY_SIZE(data->data_saved);
-
- data->saved_num = 0;
- spin_lock_irqsave(&data_saved_lock, flags);
-retry:
- for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
- if (data_saved->buffer && data_saved->cpu == cpu) {
- sal_log_record_header_t *rh = (sal_log_record_header_t *)(data_saved->buffer);
- data->log_size = data_saved->size;
- memcpy(data->log_buffer, rh, data->log_size);
- barrier(); /* id check must not be moved */
- if (rh->id == data_saved->id) {
- data->saved_num = i+1;
- break;
- }
- /* saved record changed by mca.c since interrupt, discard it */
- shift1_data_saved(data, i);
- goto retry;
- }
- }
- spin_unlock_irqrestore(&data_saved_lock, flags);
-
- if (!data->saved_num)
- work_on_cpu_safe(cpu, salinfo_log_read_cpu, data);
- if (!data->log_size) {
- data->state = STATE_NO_DATA;
- cpumask_clear_cpu(cpu, &data->cpu_event);
- } else {
- data->state = STATE_LOG_RECORD;
- }
-}
-
-static ssize_t
-salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
-{
- struct salinfo_data *data = pde_data(file_inode(file));
- u8 *buf;
- u64 bufsize;
-
- if (data->state == STATE_LOG_RECORD) {
- buf = data->log_buffer;
- bufsize = data->log_size;
- } else if (data->state == STATE_OEMDATA) {
- buf = data->oemdata;
- bufsize = data->oemdata_size;
- } else {
- buf = NULL;
- bufsize = 0;
- }
- return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
-}
-
-static long
-salinfo_log_clear_cpu(void *context)
-{
- struct salinfo_data *data = context;
-
- ia64_sal_clear_state_info(data->type);
- return 0;
-}
-
-static int
-salinfo_log_clear(struct salinfo_data *data, int cpu)
-{
- sal_log_record_header_t *rh;
- unsigned long flags;
- spin_lock_irqsave(&data_saved_lock, flags);
- data->state = STATE_NO_DATA;
- if (!cpumask_test_cpu(cpu, &data->cpu_event)) {
- spin_unlock_irqrestore(&data_saved_lock, flags);
- return 0;
- }
- cpumask_clear_cpu(cpu, &data->cpu_event);
- if (data->saved_num) {
- shift1_data_saved(data, data->saved_num - 1);
- data->saved_num = 0;
- }
- spin_unlock_irqrestore(&data_saved_lock, flags);
- rh = (sal_log_record_header_t *)(data->log_buffer);
- /* Corrected errors have already been cleared from SAL */
- if (rh->severity != sal_log_severity_corrected)
- work_on_cpu_safe(cpu, salinfo_log_clear_cpu, data);
- /* clearing a record may make a new record visible */
- salinfo_log_new_read(cpu, data);
- if (data->state == STATE_LOG_RECORD) {
- spin_lock_irqsave(&data_saved_lock, flags);
- cpumask_set_cpu(cpu, &data->cpu_event);
- wake_up_interruptible(&data->read_wait);
- spin_unlock_irqrestore(&data_saved_lock, flags);
- }
- return 0;
-}
-
-static ssize_t
-salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
-{
- struct salinfo_data *data = pde_data(file_inode(file));
- char cmd[32];
- size_t size;
- u32 offset;
- int cpu;
-
- size = sizeof(cmd);
- if (count < size)
- size = count;
- if (copy_from_user(cmd, buffer, size))
- return -EFAULT;
-
- if (sscanf(cmd, "read %d", &cpu) == 1) {
- salinfo_log_new_read(cpu, data);
- } else if (sscanf(cmd, "clear %d", &cpu) == 1) {
- int ret;
- if ((ret = salinfo_log_clear(data, cpu)))
- count = ret;
- } else if (sscanf(cmd, "oemdata %d %d", &cpu, &offset) == 2) {
- if (data->state != STATE_LOG_RECORD && data->state != STATE_OEMDATA)
- return -EINVAL;
- if (offset > data->log_size - sizeof(efi_guid_t))
- return -EINVAL;
- data->state = STATE_OEMDATA;
- if (salinfo_platform_oemdata) {
- struct salinfo_platform_oemdata_parms parms = {
- .efi_guid = data->log_buffer + offset,
- .oemdata = &data->oemdata,
- .oemdata_size = &data->oemdata_size
- };
- count = work_on_cpu_safe(cpu, salinfo_platform_oemdata_cpu,
- &parms);
- } else
- data->oemdata_size = 0;
- } else
- return -EINVAL;
-
- return count;
-}
-
-static const struct proc_ops salinfo_data_proc_ops = {
- .proc_open = salinfo_log_open,
- .proc_release = salinfo_log_release,
- .proc_read = salinfo_log_read,
- .proc_write = salinfo_log_write,
- .proc_lseek = default_llseek,
-};
-
-static int salinfo_cpu_online(unsigned int cpu)
-{
- unsigned int i, end = ARRAY_SIZE(salinfo_data);
- struct salinfo_data *data;
-
- spin_lock_irq(&data_saved_lock);
- for (i = 0, data = salinfo_data; i < end; ++i, ++data) {
- cpumask_set_cpu(cpu, &data->cpu_event);
- wake_up_interruptible(&data->read_wait);
- }
- spin_unlock_irq(&data_saved_lock);
- return 0;
-}
-
-static int salinfo_cpu_pre_down(unsigned int cpu)
-{
- unsigned int i, end = ARRAY_SIZE(salinfo_data);
- struct salinfo_data *data;
-
- spin_lock_irq(&data_saved_lock);
- for (i = 0, data = salinfo_data; i < end; ++i, ++data) {
- struct salinfo_data_saved *data_saved;
- int j = ARRAY_SIZE(data->data_saved) - 1;
-
- for (data_saved = data->data_saved + j; j >= 0;
- --j, --data_saved) {
- if (data_saved->buffer && data_saved->cpu == cpu)
- shift1_data_saved(data, j);
- }
- cpumask_clear_cpu(cpu, &data->cpu_event);
- }
- spin_unlock_irq(&data_saved_lock);
- return 0;
-}
-
-/*
- * 'data' contains an integer that corresponds to the feature we're
- * testing
- */
-static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v)
-{
- unsigned long data = (unsigned long)v;
- seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n");
- return 0;
-}
-
-static int __init
-salinfo_init(void)
-{
- struct proc_dir_entry *salinfo_dir; /* /proc/sal dir entry */
- struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */
- struct proc_dir_entry *dir, *entry;
- struct salinfo_data *data;
- int i;
-
- salinfo_dir = proc_mkdir("sal", NULL);
- if (!salinfo_dir)
- return 0;
-
- for (i=0; i < NR_SALINFO_ENTRIES; i++) {
- /* pass the feature bit in question as misc data */
- *sdir++ = proc_create_single_data(salinfo_entries[i].name, 0,
- salinfo_dir, proc_salinfo_show,
- (void *)salinfo_entries[i].feature);
- }
-
- for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
- data = salinfo_data + i;
- data->type = i;
- init_waitqueue_head(&data->read_wait);
- dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
- if (!dir)
- continue;
-
- entry = proc_create_data("event", S_IRUSR, dir,
- &salinfo_event_proc_ops, data);
- if (!entry)
- continue;
- *sdir++ = entry;
-
- entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir,
- &salinfo_data_proc_ops, data);
- if (!entry)
- continue;
- *sdir++ = entry;
-
- *sdir++ = dir;
- }
-
- *sdir++ = salinfo_dir;
-
- timer_setup(&salinfo_timer, salinfo_timeout, 0);
- salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY;
- add_timer(&salinfo_timer);
-
- i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/salinfo:online",
- salinfo_cpu_online, salinfo_cpu_pre_down);
- WARN_ON(i < 0);
- return 0;
-}
-
-module_init(salinfo_init);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
deleted file mode 100644
index 5a55ac82c1..0000000000
--- a/arch/ia64/kernel/setup.c
+++ /dev/null
@@ -1,1081 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Architecture-specific setup.
- *
- * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2000, 2004 Intel Corp
- * Rohit Seth <rohit.seth@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Gordon Jin <gordon.jin@intel.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- *
- * 12/26/04 S.Siddha, G.Jin, R.Seth
- * Add multi-threading and multi-core detection
- * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
- * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
- * 03/31/00 R.Seth cpu_initialized and current->processor fixes
- * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
- * 02/01/00 R.Seth fixed get_cpuinfo for SMP
- * 01/07/99 S.Eranian added the support for command line argument
- * 06/24/99 W.Drummond added boot_cpu_data.
- * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pgtable.h>
-
-#include <linux/acpi.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/cpu.h>
-#include <linux/kdev_t.h>
-#include <linux/kernel.h>
-#include <linux/memblock.h>
-#include <linux/reboot.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/clock.h>
-#include <linux/sched/task_stack.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/threads.h>
-#include <linux/screen_info.h>
-#include <linux/dmi.h>
-#include <linux/root_dev.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/efi.h>
-#include <linux/initrd.h>
-#include <linux/pm.h>
-#include <linux/cpufreq.h>
-#include <linux/kexec.h>
-#include <linux/crash_dump.h>
-
-#include <asm/mca.h>
-#include <asm/meminit.h>
-#include <asm/page.h>
-#include <asm/patch.h>
-#include <asm/processor.h>
-#include <asm/sal.h>
-#include <asm/sections.h>
-#include <asm/setup.h>
-#include <asm/smp.h>
-#include <asm/tlbflush.h>
-#include <asm/unistd.h>
-#include <asm/uv/uv.h>
-#include <asm/xtp.h>
-
-#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
-# error "struct cpuinfo_ia64 too big!"
-#endif
-
-char ia64_platform_name[64];
-
-#ifdef CONFIG_SMP
-unsigned long __per_cpu_offset[NR_CPUS];
-EXPORT_SYMBOL(__per_cpu_offset);
-#endif
-
-DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
-EXPORT_SYMBOL(ia64_cpu_info);
-DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(local_per_cpu_offset);
-#endif
-unsigned long ia64_cycles_per_usec;
-struct ia64_boot_param *ia64_boot_param;
-struct screen_info screen_info;
-unsigned long vga_console_iobase;
-unsigned long vga_console_membase;
-
-static struct resource data_resource = {
- .name = "Kernel data",
- .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
-};
-
-static struct resource code_resource = {
- .name = "Kernel code",
- .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
-};
-
-static struct resource bss_resource = {
- .name = "Kernel bss",
- .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
-};
-
-unsigned long ia64_max_cacheline_size;
-
-unsigned long ia64_iobase; /* virtual address for I/O accesses */
-EXPORT_SYMBOL(ia64_iobase);
-struct io_space io_space[MAX_IO_SPACES];
-EXPORT_SYMBOL(io_space);
-unsigned int num_io_spaces;
-
-/*
- * "flush_icache_range()" needs to know what processor dependent stride size to use
- * when it makes i-cache(s) coherent with d-caches.
- */
-#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
-unsigned long ia64_i_cache_stride_shift = ~0;
-/*
- * "clflush_cache_range()" needs to know what processor dependent stride size to
- * use when it flushes cache lines including both d-cache and i-cache.
- */
-/* Safest way to go: 32 bytes by 32 bytes */
-#define CACHE_STRIDE_SHIFT 5
-unsigned long ia64_cache_stride_shift = ~0;
-
-/*
- * We use a special marker for the end of memory and it uses the extra (+1) slot
- */
-struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
-static int num_rsvd_regions __initdata;
-
-
-/*
- * Filter incoming memory segments based on the primitive map created from the boot
- * parameters. Segments contained in the map are removed from the memory ranges. A
- * caller-specified function is called with the memory ranges that remain after filtering.
- * This routine does not assume the incoming segments are sorted.
- */
-int __init
-filter_rsvd_memory (u64 start, u64 end, void *arg)
-{
- u64 range_start, range_end, prev_start;
- void (*func)(unsigned long, unsigned long, int);
- int i;
-
-#if IGNORE_PFN0
- if (start == PAGE_OFFSET) {
- printk(KERN_WARNING "warning: skipping physical page 0\n");
- start += PAGE_SIZE;
- if (start >= end) return 0;
- }
-#endif
- /*
- * lowest possible address(walker uses virtual)
- */
- prev_start = PAGE_OFFSET;
- func = arg;
-
- for (i = 0; i < num_rsvd_regions; ++i) {
- range_start = max(start, prev_start);
- range_end = min(end, rsvd_region[i].start);
-
- if (range_start < range_end)
- call_pernode_memory(__pa(range_start), range_end - range_start, func);
-
- /* nothing more available in this segment */
- if (range_end == end) return 0;
-
- prev_start = rsvd_region[i].end;
- }
- /* end of memory marker allows full processing inside loop body */
- return 0;
-}
-
-/*
- * Similar to "filter_rsvd_memory()", but the reserved memory ranges
- * are not filtered out.
- */
-int __init
-filter_memory(u64 start, u64 end, void *arg)
-{
- void (*func)(unsigned long, unsigned long, int);
-
-#if IGNORE_PFN0
- if (start == PAGE_OFFSET) {
- printk(KERN_WARNING "warning: skipping physical page 0\n");
- start += PAGE_SIZE;
- if (start >= end)
- return 0;
- }
-#endif
- func = arg;
- if (start < end)
- call_pernode_memory(__pa(start), end - start, func);
- return 0;
-}
-
-static void __init
-sort_regions (struct rsvd_region *rsvd_region, int max)
-{
- int j;
-
- /* simple bubble sorting */
- while (max--) {
- for (j = 0; j < max; ++j) {
- if (rsvd_region[j].start > rsvd_region[j+1].start) {
- swap(rsvd_region[j], rsvd_region[j + 1]);
- }
- }
- }
-}
-
-/* merge overlaps */
-static int __init
-merge_regions (struct rsvd_region *rsvd_region, int max)
-{
- int i;
- for (i = 1; i < max; ++i) {
- if (rsvd_region[i].start >= rsvd_region[i-1].end)
- continue;
- if (rsvd_region[i].end > rsvd_region[i-1].end)
- rsvd_region[i-1].end = rsvd_region[i].end;
- --max;
- memmove(&rsvd_region[i], &rsvd_region[i+1],
- (max - i) * sizeof(struct rsvd_region));
- }
- return max;
-}
-
-/*
- * Request address space for all standard resources
- */
-static int __init register_memory(void)
-{
- code_resource.start = ia64_tpa(_text);
- code_resource.end = ia64_tpa(_etext) - 1;
- data_resource.start = ia64_tpa(_etext);
- data_resource.end = ia64_tpa(_edata) - 1;
- bss_resource.start = ia64_tpa(__bss_start);
- bss_resource.end = ia64_tpa(_end) - 1;
- efi_initialize_iomem_resources(&code_resource, &data_resource,
- &bss_resource);
-
- return 0;
-}
-
-__initcall(register_memory);
-
-
-#ifdef CONFIG_KEXEC
-
-/*
- * This function checks if the reserved crashkernel is allowed on the specific
- * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
- * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
- * kernel/dma/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
- * in kdump case. See the comment in sba_init() in sba_iommu.c.
- *
- * So, the only machvec that really supports loading the kdump kernel
- * over 4 GB is "uv".
- */
-static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
-{
- if (is_uv_system())
- return 1;
- else
- return pbase < (1UL << 32);
-}
-
-static void __init setup_crashkernel(unsigned long total, int *n)
-{
- unsigned long long base = 0, size = 0;
- int ret;
-
- ret = parse_crashkernel(boot_command_line, total,
- &size, &base);
- if (ret == 0 && size > 0) {
- if (!base) {
- sort_regions(rsvd_region, *n);
- *n = merge_regions(rsvd_region, *n);
- base = kdump_find_rsvd_region(size,
- rsvd_region, *n);
- }
-
- if (!check_crashkernel_memory(base, size)) {
- pr_warn("crashkernel: There would be kdump memory "
- "at %ld GB but this is unusable because it "
- "must\nbe below 4 GB. Change the memory "
- "configuration of the machine.\n",
- (unsigned long)(base >> 30));
- return;
- }
-
- if (base != ~0UL) {
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(size >> 20),
- (unsigned long)(base >> 20),
- (unsigned long)(total >> 20));
- rsvd_region[*n].start =
- (unsigned long)__va(base);
- rsvd_region[*n].end =
- (unsigned long)__va(base + size);
- (*n)++;
- crashk_res.start = base;
- crashk_res.end = base + size - 1;
- }
- }
- efi_memmap_res.start = ia64_boot_param->efi_memmap;
- efi_memmap_res.end = efi_memmap_res.start +
- ia64_boot_param->efi_memmap_size;
- boot_param_res.start = __pa(ia64_boot_param);
- boot_param_res.end = boot_param_res.start +
- sizeof(*ia64_boot_param);
-}
-#else
-static inline void __init setup_crashkernel(unsigned long total, int *n)
-{}
-#endif
-
-#ifdef CONFIG_CRASH_DUMP
-static int __init reserve_elfcorehdr(u64 *start, u64 *end)
-{
- u64 length;
-
- /* We get the address using the kernel command line,
- * but the size is extracted from the EFI tables.
- * Both address and size are required for reservation
- * to work properly.
- */
-
- if (!is_vmcore_usable())
- return -EINVAL;
-
- if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
- vmcore_unusable();
- return -EINVAL;
- }
-
- *start = (unsigned long)__va(elfcorehdr_addr);
- *end = *start + length;
- return 0;
-}
-#endif /* CONFIG_CRASH_DUMP */
-
-/**
- * reserve_memory - setup reserved memory areas
- *
- * Setup the reserved memory areas set aside for the boot parameters,
- * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
- * see arch/ia64/include/asm/meminit.h if you need to define more.
- */
-void __init
-reserve_memory (void)
-{
- int n = 0;
- unsigned long total_memory;
-
- /*
- * none of the entries in this table overlap
- */
- rsvd_region[n].start = (unsigned long) ia64_boot_param;
- rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
- n++;
-
- rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
- rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
- n++;
-
- rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
- rsvd_region[n].end = (rsvd_region[n].start
- + strlen(__va(ia64_boot_param->command_line)) + 1);
- n++;
-
- rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
- rsvd_region[n].end = (unsigned long) ia64_imva(_end);
- n++;
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (ia64_boot_param->initrd_start) {
- rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
- rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
- n++;
- }
-#endif
-
-#ifdef CONFIG_CRASH_DUMP
- if (reserve_elfcorehdr(&rsvd_region[n].start,
- &rsvd_region[n].end) == 0)
- n++;
-#endif
-
- total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
- n++;
-
- setup_crashkernel(total_memory, &n);
-
- /* end of memory marker */
- rsvd_region[n].start = ~0UL;
- rsvd_region[n].end = ~0UL;
- n++;
-
- num_rsvd_regions = n;
- BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
-
- sort_regions(rsvd_region, num_rsvd_regions);
- num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
-
- /* reserve all regions except the end of memory marker with memblock */
- for (n = 0; n < num_rsvd_regions - 1; n++) {
- struct rsvd_region *region = &rsvd_region[n];
- phys_addr_t addr = __pa(region->start);
- phys_addr_t size = region->end - region->start;
-
- memblock_reserve(addr, size);
- }
-}
-
-/**
- * find_initrd - get initrd parameters from the boot parameter structure
- *
- * Grab the initrd start and end from the boot parameter struct given us by
- * the boot loader.
- */
-void __init
-find_initrd (void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (ia64_boot_param->initrd_start) {
- initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
- initrd_end = initrd_start+ia64_boot_param->initrd_size;
-
- printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
- initrd_start, ia64_boot_param->initrd_size);
- }
-#endif
-}
-
-static void __init
-io_port_init (void)
-{
- unsigned long phys_iobase;
-
- /*
- * Set `iobase' based on the EFI memory map or, failing that, the
- * value firmware left in ar.k0.
- *
- * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
- * the port's virtual address, so ia32_load_state() loads it with a
- * user virtual address. But in ia64 mode, glibc uses the
- * *physical* address in ar.k0 to mmap the appropriate area from
- * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
- * cases, user-mode can only use the legacy 0-64K I/O port space.
- *
- * ar.k0 is not involved in kernel I/O port accesses, which can use
- * any of the I/O port spaces and are done via MMIO using the
- * virtual mmio_base from the appropriate io_space[].
- */
- phys_iobase = efi_get_iobase();
- if (!phys_iobase) {
- phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
- printk(KERN_INFO "No I/O port range found in EFI memory map, "
- "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
- }
- ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
- ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
-
- /* setup legacy IO port space */
- io_space[0].mmio_base = ia64_iobase;
- io_space[0].sparse = 1;
- num_io_spaces = 1;
-}
-
-/**
- * early_console_setup - setup debugging console
- *
- * Consoles started here require little enough setup that we can start using
- * them very early in the boot process, either right after the machine
- * vector initialization, or even before if the drivers can detect their hw.
- *
- * Returns non-zero if a console couldn't be setup.
- */
-static inline int __init
-early_console_setup (char *cmdline)
-{
-#ifdef CONFIG_EFI_PCDP
- if (!efi_setup_pcdp_console(cmdline))
- return 0;
-#endif
- return -1;
-}
-
-static void __init
-screen_info_setup(void)
-{
- unsigned int orig_x, orig_y, num_cols, num_rows, font_height;
-
- memset(&screen_info, 0, sizeof(screen_info));
-
- if (!ia64_boot_param->console_info.num_rows ||
- !ia64_boot_param->console_info.num_cols) {
- printk(KERN_WARNING "invalid screen-info, guessing 80x25\n");
- orig_x = 0;
- orig_y = 0;
- num_cols = 80;
- num_rows = 25;
- font_height = 16;
- } else {
- orig_x = ia64_boot_param->console_info.orig_x;
- orig_y = ia64_boot_param->console_info.orig_y;
- num_cols = ia64_boot_param->console_info.num_cols;
- num_rows = ia64_boot_param->console_info.num_rows;
- font_height = 400 / num_rows;
- }
-
- screen_info.orig_x = orig_x;
- screen_info.orig_y = orig_y;
- screen_info.orig_video_cols = num_cols;
- screen_info.orig_video_lines = num_rows;
- screen_info.orig_video_points = font_height;
- screen_info.orig_video_mode = 3; /* XXX fake */
- screen_info.orig_video_isVGA = 1; /* XXX fake */
- screen_info.orig_video_ega_bx = 3; /* XXX fake */
-}
-
-static inline void
-mark_bsp_online (void)
-{
-#ifdef CONFIG_SMP
- /* If we register an early console, allow CPU 0 to printk */
- set_cpu_online(smp_processor_id(), true);
-#endif
-}
-
-static __initdata int nomca;
-static __init int setup_nomca(char *s)
-{
- nomca = 1;
- return 0;
-}
-early_param("nomca", setup_nomca);
-
-void __init
-setup_arch (char **cmdline_p)
-{
- unw_init();
-
- ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
-
- *cmdline_p = __va(ia64_boot_param->command_line);
- strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
-
- efi_init();
- io_port_init();
-
- uv_probe_system_type();
- parse_early_param();
-
- if (early_console_setup(*cmdline_p) == 0)
- mark_bsp_online();
-
- /* Initialize the ACPI boot-time table parser */
- acpi_table_init();
- early_acpi_boot_init();
-#ifdef CONFIG_ACPI_NUMA
- acpi_numa_init();
- acpi_numa_fixup();
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
- prefill_possible_map();
-#endif
- per_cpu_scan_finalize((cpumask_empty(&early_cpu_possible_map) ?
- 32 : cpumask_weight(&early_cpu_possible_map)),
- additional_cpus > 0 ? additional_cpus : 0);
-#endif /* CONFIG_ACPI_NUMA */
-
-#ifdef CONFIG_SMP
- smp_build_cpu_map();
-#endif
- find_memory();
-
- /* process SAL system table: */
- ia64_sal_init(__va(sal_systab_phys));
-
-#ifdef CONFIG_ITANIUM
- ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
-#else
- {
- unsigned long num_phys_stacked;
-
- if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
- ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
- }
-#endif
-
-#ifdef CONFIG_SMP
- cpu_physical_id(0) = hard_smp_processor_id();
-#endif
-
- cpu_init(); /* initialize the bootstrap CPU */
- mmu_context_init(); /* initialize context_id bitmap */
-
-#ifdef CONFIG_VT
- if (!conswitchp) {
-# if defined(CONFIG_VGA_CONSOLE)
- /*
- * Non-legacy systems may route legacy VGA MMIO range to system
- * memory. vga_con probes the MMIO hole, so memory looks like
- * a VGA device to it. The EFI memory map can tell us if it's
- * memory so we can avoid this problem.
- */
- if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
- conswitchp = &vga_con;
-# endif
- }
-#endif
-
- /* enable IA-64 Machine Check Abort Handling unless disabled */
- if (!nomca)
- ia64_mca_init();
-
- /*
- * Default to /dev/sda2. This assumes that the EFI partition
- * is physical disk 1 partition 1 and the Linux root disk is
- * physical disk 1 partition 2.
- */
- ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2);
-
- if (is_uv_system())
- uv_setup(cmdline_p);
-#ifdef CONFIG_SMP
- else
- init_smp_config();
-#endif
-
- screen_info_setup();
- paging_init();
-
- clear_sched_clock_stable();
-}
-
-/*
- * Display cpu info for all CPUs.
- */
-static int
-show_cpuinfo (struct seq_file *m, void *v)
-{
-#ifdef CONFIG_SMP
-# define lpj c->loops_per_jiffy
-# define cpunum c->cpu
-#else
-# define lpj loops_per_jiffy
-# define cpunum 0
-#endif
- static struct {
- unsigned long mask;
- const char *feature_name;
- } feature_bits[] = {
- { 1UL << 0, "branchlong" },
- { 1UL << 1, "spontaneous deferral"},
- { 1UL << 2, "16-byte atomic ops" }
- };
- char features[128], *cp, *sep;
- struct cpuinfo_ia64 *c = v;
- unsigned long mask;
- unsigned long proc_freq;
- int i, size;
-
- mask = c->features;
-
- /* build the feature string: */
- memcpy(features, "standard", 9);
- cp = features;
- size = sizeof(features);
- sep = "";
- for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
- if (mask & feature_bits[i].mask) {
- cp += snprintf(cp, size, "%s%s", sep,
- feature_bits[i].feature_name),
- sep = ", ";
- mask &= ~feature_bits[i].mask;
- size = sizeof(features) - (cp - features);
- }
- }
- if (mask && size > 1) {
- /* print unknown features as a hex value */
- snprintf(cp, size, "%s0x%lx", sep, mask);
- }
-
- proc_freq = cpufreq_quick_get(cpunum);
- if (!proc_freq)
- proc_freq = c->proc_freq / 1000;
-
- seq_printf(m,
- "processor : %d\n"
- "vendor : %s\n"
- "arch : IA-64\n"
- "family : %u\n"
- "model : %u\n"
- "model name : %s\n"
- "revision : %u\n"
- "archrev : %u\n"
- "features : %s\n"
- "cpu number : %lu\n"
- "cpu regs : %u\n"
- "cpu MHz : %lu.%03lu\n"
- "itc MHz : %lu.%06lu\n"
- "BogoMIPS : %lu.%02lu\n",
- cpunum, c->vendor, c->family, c->model,
- c->model_name, c->revision, c->archrev,
- features, c->ppn, c->number,
- proc_freq / 1000, proc_freq % 1000,
- c->itc_freq / 1000000, c->itc_freq % 1000000,
- lpj*HZ/500000, (lpj*HZ/5000) % 100);
-#ifdef CONFIG_SMP
- seq_printf(m, "siblings : %u\n",
- cpumask_weight(&cpu_core_map[cpunum]));
- if (c->socket_id != -1)
- seq_printf(m, "physical id: %u\n", c->socket_id);
- if (c->threads_per_core > 1 || c->cores_per_socket > 1)
- seq_printf(m,
- "core id : %u\n"
- "thread id : %u\n",
- c->core_id, c->thread_id);
-#endif
- seq_printf(m,"\n");
-
- return 0;
-}
-
-static void *
-c_start (struct seq_file *m, loff_t *pos)
-{
-#ifdef CONFIG_SMP
- while (*pos < nr_cpu_ids && !cpu_online(*pos))
- ++*pos;
-#endif
- return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
-}
-
-static void *
-c_next (struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-
-static void
-c_stop (struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo
-};
-
-#define MAX_BRANDS 8
-static char brandname[MAX_BRANDS][128];
-
-static char *
-get_model_name(__u8 family, __u8 model)
-{
- static int overflow;
- char brand[128];
- int i;
-
- memcpy(brand, "Unknown", 8);
- if (ia64_pal_get_brand_info(brand)) {
- if (family == 0x7)
- memcpy(brand, "Merced", 7);
- else if (family == 0x1f) switch (model) {
- case 0: memcpy(brand, "McKinley", 9); break;
- case 1: memcpy(brand, "Madison", 8); break;
- case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
- }
- }
- for (i = 0; i < MAX_BRANDS; i++)
- if (strcmp(brandname[i], brand) == 0)
- return brandname[i];
- for (i = 0; i < MAX_BRANDS; i++)
- if (brandname[i][0] == '\0')
- return strcpy(brandname[i], brand);
- if (overflow++ == 0)
- printk(KERN_ERR
- "%s: Table overflow. Some processor model information will be missing\n",
- __func__);
- return "Unknown";
-}
-
-static void
-identify_cpu (struct cpuinfo_ia64 *c)
-{
- union {
- unsigned long bits[5];
- struct {
- /* id 0 & 1: */
- char vendor[16];
-
- /* id 2 */
- u64 ppn; /* processor serial number */
-
- /* id 3: */
- unsigned number : 8;
- unsigned revision : 8;
- unsigned model : 8;
- unsigned family : 8;
- unsigned archrev : 8;
- unsigned reserved : 24;
-
- /* id 4: */
- u64 features;
- } field;
- } cpuid;
- pal_vm_info_1_u_t vm1;
- pal_vm_info_2_u_t vm2;
- pal_status_t status;
- unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
- int i;
- for (i = 0; i < 5; ++i)
- cpuid.bits[i] = ia64_get_cpuid(i);
-
- memcpy(c->vendor, cpuid.field.vendor, 16);
-#ifdef CONFIG_SMP
- c->cpu = smp_processor_id();
-
- /* below default values will be overwritten by identify_siblings()
- * for Multi-Threading/Multi-Core capable CPUs
- */
- c->threads_per_core = c->cores_per_socket = c->num_log = 1;
- c->socket_id = -1;
-
- identify_siblings(c);
-
- if (c->threads_per_core > smp_num_siblings)
- smp_num_siblings = c->threads_per_core;
-#endif
- c->ppn = cpuid.field.ppn;
- c->number = cpuid.field.number;
- c->revision = cpuid.field.revision;
- c->model = cpuid.field.model;
- c->family = cpuid.field.family;
- c->archrev = cpuid.field.archrev;
- c->features = cpuid.field.features;
- c->model_name = get_model_name(c->family, c->model);
-
- status = ia64_pal_vm_summary(&vm1, &vm2);
- if (status == PAL_STATUS_SUCCESS) {
- impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
- phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
- }
- c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
- c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
-}
-
-/*
- * Do the following calculations:
- *
- * 1. the max. cache line size.
- * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
- * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
- */
-static void
-get_cache_info(void)
-{
- unsigned long line_size, max = 1;
- unsigned long l, levels, unique_caches;
- pal_cache_config_info_t cci;
- long status;
-
- status = ia64_pal_cache_summary(&levels, &unique_caches);
- if (status != 0) {
- printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
- __func__, status);
- max = SMP_CACHE_BYTES;
- /* Safest setup for "flush_icache_range()" */
- ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
- /* Safest setup for "clflush_cache_range()" */
- ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
- goto out;
- }
-
- for (l = 0; l < levels; ++l) {
- /* cache_type (data_or_unified)=2 */
- status = ia64_pal_cache_config_info(l, 2, &cci);
- if (status != 0) {
- printk(KERN_ERR "%s: ia64_pal_cache_config_info"
- "(l=%lu, 2) failed (status=%ld)\n",
- __func__, l, status);
- max = SMP_CACHE_BYTES;
- /* The safest setup for "flush_icache_range()" */
- cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
- /* The safest setup for "clflush_cache_range()" */
- ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
- cci.pcci_unified = 1;
- } else {
- if (cci.pcci_stride < ia64_cache_stride_shift)
- ia64_cache_stride_shift = cci.pcci_stride;
-
- line_size = 1 << cci.pcci_line_size;
- if (line_size > max)
- max = line_size;
- }
-
- if (!cci.pcci_unified) {
- /* cache_type (instruction)=1*/
- status = ia64_pal_cache_config_info(l, 1, &cci);
- if (status != 0) {
- printk(KERN_ERR "%s: ia64_pal_cache_config_info"
- "(l=%lu, 1) failed (status=%ld)\n",
- __func__, l, status);
- /* The safest setup for flush_icache_range() */
- cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
- }
- }
- if (cci.pcci_stride < ia64_i_cache_stride_shift)
- ia64_i_cache_stride_shift = cci.pcci_stride;
- }
- out:
- if (max > ia64_max_cacheline_size)
- ia64_max_cacheline_size = max;
-}
-
-/*
- * cpu_init() initializes state that is per-CPU. This function acts
- * as a 'CPU state barrier', nothing should get across.
- */
-void
-cpu_init (void)
-{
- extern void ia64_mmu_init(void *);
- static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
- unsigned long num_phys_stacked;
- pal_vm_info_2_u_t vmi;
- unsigned int max_ctx;
- struct cpuinfo_ia64 *cpu_info;
- void *cpu_data;
-
- cpu_data = per_cpu_init();
-#ifdef CONFIG_SMP
- /*
- * insert boot cpu into sibling and core mapes
- * (must be done after per_cpu area is setup)
- */
- if (smp_processor_id() == 0) {
- cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
- cpumask_set_cpu(0, &cpu_core_map[0]);
- } else {
- /*
- * Set ar.k3 so that assembly code in MCA handler can compute
- * physical addresses of per cpu variables with a simple:
- * phys = ar.k3 + &per_cpu_var
- * and the alt-dtlb-miss handler can set per-cpu mapping into
- * the TLB when needed. head.S already did this for cpu0.
- */
- ia64_set_kr(IA64_KR_PER_CPU_DATA,
- ia64_tpa(cpu_data) - (long) __per_cpu_start);
- }
-#endif
-
- get_cache_info();
-
- /*
- * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
- * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
- * depends on the data returned by identify_cpu(). We break the dependency by
- * accessing cpu_data() through the canonical per-CPU address.
- */
- cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
- identify_cpu(cpu_info);
-
-#ifdef CONFIG_MCKINLEY
- {
-# define FEATURE_SET 16
- struct ia64_pal_retval iprv;
-
- if (cpu_info->family == 0x1f) {
- PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
- if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
- PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
- (iprv.v1 | 0x80), FEATURE_SET, 0);
- }
- }
-#endif
-
- /* Clear the stack memory reserved for pt_regs: */
- memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
-
- ia64_set_kr(IA64_KR_FPU_OWNER, 0);
-
- /*
- * Initialize the page-table base register to a global
- * directory with all zeroes. This ensure that we can handle
- * TLB-misses to user address-space even before we created the
- * first user address-space. This may happen, e.g., due to
- * aggressive use of lfetch.fault.
- */
- ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
-
- /*
- * Initialize default control register to defer speculative faults except
- * for those arising from TLB misses, which are not deferred. The
- * kernel MUST NOT depend on a particular setting of these bits (in other words,
- * the kernel must have recovery code for all speculative accesses). Turn on
- * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
- * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
- * be fine).
- */
- ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
- | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
- mmgrab(&init_mm);
- current->active_mm = &init_mm;
- BUG_ON(current->mm);
-
- ia64_mmu_init(ia64_imva(cpu_data));
- ia64_mca_cpu_init(ia64_imva(cpu_data));
-
- /* Clear ITC to eliminate sched_clock() overflows in human time. */
- ia64_set_itc(0);
-
- /* disable all local interrupt sources: */
- ia64_set_itv(1 << 16);
- ia64_set_lrr0(1 << 16);
- ia64_set_lrr1(1 << 16);
- ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
- ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
-
- /* clear TPR & XTP to enable all interrupt classes: */
- ia64_setreg(_IA64_REG_CR_TPR, 0);
-
- /* Clear any pending interrupts left by SAL/EFI */
- while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
- ia64_eoi();
-
-#ifdef CONFIG_SMP
- normal_xtp();
-#endif
-
- /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
- if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
- max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
- setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
- } else {
- printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
- max_ctx = (1U << 15) - 1; /* use architected minimum */
- }
- while (max_ctx < ia64_ctx.max_ctx) {
- unsigned int old = ia64_ctx.max_ctx;
- if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
- break;
- }
-
- if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
- printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
- "stacked regs\n");
- num_phys_stacked = 96;
- }
- /* size of physical stacked register partition plus 8 bytes: */
- if (num_phys_stacked > max_num_phys_stacked) {
- ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
- max_num_phys_stacked = num_phys_stacked;
- }
-}
-
-void __init arch_cpu_finalize_init(void)
-{
- ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
- (unsigned long) __end___mckinley_e9_bundles);
-}
-
-static int __init run_dmi_scan(void)
-{
- dmi_setup();
- return 0;
-}
-core_initcall(run_dmi_scan);
diff --git a/arch/ia64/kernel/sigframe.h b/arch/ia64/kernel/sigframe.h
deleted file mode 100644
index 58a36ce6c2..0000000000
--- a/arch/ia64/kernel/sigframe.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-struct sigscratch {
- unsigned long scratch_unat; /* ar.unat for the general registers saved in pt */
- unsigned long ar_pfs; /* for syscalls, the user-level function-state */
- struct pt_regs pt;
-};
-
-struct sigframe {
- /*
- * Place signal handler args where user-level unwinder can find them easily.
- * DO NOT MOVE THESE. They are part of the IA-64 Linux ABI and there is
- * user-level code that depends on their presence!
- */
- unsigned long arg0; /* signum */
- unsigned long arg1; /* siginfo pointer */
- unsigned long arg2; /* sigcontext pointer */
- /*
- * End of architected state.
- */
-
- void __user *handler; /* pointer to the plabel of the signal handler */
- struct siginfo info;
- struct sigcontext sc;
-};
-
-extern void ia64_do_signal (struct sigscratch *, long);
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
deleted file mode 100644
index 51cf6a7ec1..0000000000
--- a/arch/ia64/kernel/signal.c
+++ /dev/null
@@ -1,412 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Architecture-specific signal handling support.
- *
- * Copyright (C) 1999-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Derived from i386 and Alpha versions.
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/ptrace.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/smp.h>
-#include <linux/stddef.h>
-#include <linux/tty.h>
-#include <linux/binfmts.h>
-#include <linux/unistd.h>
-#include <linux/wait.h>
-
-#include <asm/intrinsics.h>
-#include <linux/uaccess.h>
-#include <asm/rse.h>
-#include <asm/sigcontext.h>
-
-#include "sigframe.h"
-
-#define DEBUG_SIG 0
-#define STACK_ALIGN 16 /* minimal alignment for stack pointer */
-
-#if _NSIG_WORDS > 1
-# define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t))
-# define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t))
-#else
-# define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0])
-# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
-#endif
-
-static long
-restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
-{
- unsigned long ip, flags, nat, um, cfm, rsc;
- long err;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- /* restore scratch that always needs gets updated during signal delivery: */
- err = __get_user(flags, &sc->sc_flags);
- err |= __get_user(nat, &sc->sc_nat);
- err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */
- err |= __get_user(cfm, &sc->sc_cfm);
- err |= __get_user(um, &sc->sc_um); /* user mask */
- err |= __get_user(rsc, &sc->sc_ar_rsc);
- err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
- err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
- err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
- err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */
- err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
- err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
- err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */
- err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
- err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
- err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
-
- scr->pt.cr_ifs = cfm | (1UL << 63);
- scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
-
- /* establish new instruction pointer: */
- scr->pt.cr_iip = ip & ~0x3UL;
- ia64_psr(&scr->pt)->ri = ip & 0x3;
- scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM);
-
- scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat);
-
- if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
- /* Restore most scratch-state only when not in syscall. */
- err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
- err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
- err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
- err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
- err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
- err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
- }
-
- if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) {
- struct ia64_psr *psr = ia64_psr(&scr->pt);
-
- err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16);
- psr->mfh = 0; /* drop signal handler's fph contents... */
- preempt_disable();
- if (psr->dfh)
- ia64_drop_fpu(current);
- else {
- /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */
- __ia64_load_fpu(current->thread.fph);
- ia64_set_local_fpu_owner(current);
- }
- preempt_enable();
- }
- return err;
-}
-
-long
-ia64_rt_sigreturn (struct sigscratch *scr)
-{
- extern char ia64_strace_leave_kernel, ia64_leave_kernel;
- struct sigcontext __user *sc;
- sigset_t set;
- long retval;
-
- sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc;
-
- /*
- * When we return to the previously executing context, r8 and r10 have already
- * been setup the way we want them. Indeed, if the signal wasn't delivered while
- * in a system call, we must not touch r8 or r10 as otherwise user-level state
- * could be corrupted.
- */
- retval = (long) &ia64_leave_kernel;
- if (test_thread_flag(TIF_SYSCALL_TRACE)
- || test_thread_flag(TIF_SYSCALL_AUDIT))
- /*
- * strace expects to be notified after sigreturn returns even though the
- * context to which we return may not be in the middle of a syscall.
- * Thus, the return-value that strace displays for sigreturn is
- * meaningless.
- */
- retval = (long) &ia64_strace_leave_kernel;
-
- if (!access_ok(sc, sizeof(*sc)))
- goto give_sigsegv;
-
- if (GET_SIGSET(&set, &sc->sc_mask))
- goto give_sigsegv;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(sc, scr))
- goto give_sigsegv;
-
-#if DEBUG_SIG
- printk("SIG return (%s:%d): sp=%lx ip=%lx\n",
- current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip);
-#endif
- if (restore_altstack(&sc->sc_stack))
- goto give_sigsegv;
- return retval;
-
- give_sigsegv:
- force_sig(SIGSEGV);
- return retval;
-}
-
-/*
- * This does just the minimum required setup of sigcontext.
- * Specifically, it only installs data that is either not knowable at
- * the user-level or that gets modified before execution in the
- * trampoline starts. Everything else is done at the user-level.
- */
-static long
-setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr)
-{
- unsigned long flags = 0, ifs, cfm, nat;
- long err = 0;
-
- ifs = scr->pt.cr_ifs;
-
- if (on_sig_stack((unsigned long) sc))
- flags |= IA64_SC_FLAG_ONSTACK;
- if ((ifs & (1UL << 63)) == 0)
- /* if cr_ifs doesn't have the valid bit set, we got here through a syscall */
- flags |= IA64_SC_FLAG_IN_SYSCALL;
- cfm = ifs & ((1UL << 38) - 1);
- ia64_flush_fph(current);
- if ((current->thread.flags & IA64_THREAD_FPH_VALID)) {
- flags |= IA64_SC_FLAG_FPH_VALID;
- err = __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16);
- }
-
- nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat);
-
- err |= __put_user(flags, &sc->sc_flags);
- err |= __put_user(nat, &sc->sc_nat);
- err |= PUT_SIGSET(mask, &sc->sc_mask);
- err |= __put_user(cfm, &sc->sc_cfm);
- err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um);
- err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
- err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */
- err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */
- err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
- err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */
- err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
- err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
- err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */
- err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
- err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */
- err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */
- err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
-
- if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
- /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */
- err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
- err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
- err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
- err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */
- err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
- err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
- }
- return err;
-}
-
-/*
- * Check whether the register-backing store is already on the signal stack.
- */
-static inline int
-rbs_on_sig_stack (unsigned long bsp)
-{
- return (bsp - current->sas_ss_sp < current->sas_ss_size);
-}
-
-static long
-setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr)
-{
- extern char __kernel_sigtramp[];
- unsigned long tramp_addr, new_rbs = 0, new_sp;
- struct sigframe __user *frame;
- long err;
-
- new_sp = scr->pt.r12;
- tramp_addr = (unsigned long) __kernel_sigtramp;
- if (ksig->ka.sa.sa_flags & SA_ONSTACK) {
- int onstack = sas_ss_flags(new_sp);
-
- if (onstack == 0) {
- new_sp = current->sas_ss_sp + current->sas_ss_size;
- /*
- * We need to check for the register stack being on the
- * signal stack separately, because it's switched
- * separately (memory stack is switched in the kernel,
- * register stack is switched in the signal trampoline).
- */
- if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
- new_rbs = ALIGN(current->sas_ss_sp,
- sizeof(long));
- } else if (onstack == SS_ONSTACK) {
- unsigned long check_sp;
-
- /*
- * If we are on the alternate signal stack and would
- * overflow it, don't. Return an always-bogus address
- * instead so we will die with SIGSEGV.
- */
- check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
- if (!likely(on_sig_stack(check_sp))) {
- force_sigsegv(ksig->sig);
- return 1;
- }
- }
- }
- frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
-
- if (!access_ok(frame, sizeof(*frame))) {
- force_sigsegv(ksig->sig);
- return 1;
- }
-
- err = __put_user(ksig->sig, &frame->arg0);
- err |= __put_user(&frame->info, &frame->arg1);
- err |= __put_user(&frame->sc, &frame->arg2);
- err |= __put_user(new_rbs, &frame->sc.sc_rbs_base);
- err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */
- err |= __put_user(ksig->ka.sa.sa_handler, &frame->handler);
-
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
-
- err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12);
- err |= setup_sigcontext(&frame->sc, set, scr);
-
- if (unlikely(err)) {
- force_sigsegv(ksig->sig);
- return 1;
- }
-
- scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */
- scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
- scr->pt.cr_iip = tramp_addr;
- ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */
- ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */
- /*
- * Force the interruption function mask to zero. This has no effect when a
- * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is
- * ignored), but it has the desirable effect of making it possible to deliver a
- * signal with an incomplete register frame (which happens when a mandatory RSE
- * load faults). Furthermore, it has no negative effect on the getting the user's
- * dirty partition preserved, because that's governed by scr->pt.loadrs.
- */
- scr->pt.cr_ifs = (1UL << 63);
-
- /*
- * Note: this affects only the NaT bits of the scratch regs (the ones saved in
- * pt_regs), which is exactly what we want.
- */
- scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */
-
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n",
- current->comm, current->pid, ksig->sig, scr->pt.r12, frame->sc.sc_ip, frame->handler);
-#endif
- return 0;
-}
-
-static long
-handle_signal (struct ksignal *ksig, struct sigscratch *scr)
-{
- int ret = setup_frame(ksig, sigmask_to_save(), scr);
-
- if (!ret)
- signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
-
- return ret;
-}
-
-/*
- * Note that `init' is a special process: it doesn't get signals it doesn't want to
- * handle. Thus you cannot kill init even with a SIGKILL even by mistake.
- */
-void
-ia64_do_signal (struct sigscratch *scr, long in_syscall)
-{
- long restart = in_syscall;
- long errno = scr->pt.r8;
- struct ksignal ksig;
-
- /*
- * This only loops in the rare cases of handle_signal() failing, in which case we
- * need to push through a forced SIGSEGV.
- */
- while (1) {
- if (!get_signal(&ksig))
- break;
-
- /*
- * get_signal() may have run a debugger (via notify_parent())
- * and the debugger may have modified the state (e.g., to arrange for an
- * inferior call), thus it's important to check for restarting _after_
- * get_signal().
- */
- if ((long) scr->pt.r10 != -1)
- /*
- * A system calls has to be restarted only if one of the error codes
- * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10
- * isn't -1 then r8 doesn't hold an error code and we don't need to
- * restart the syscall, so we can clear the "restart" flag here.
- */
- restart = 0;
-
- if (ksig.sig <= 0)
- break;
-
- if (unlikely(restart)) {
- switch (errno) {
- case ERESTART_RESTARTBLOCK:
- case ERESTARTNOHAND:
- scr->pt.r8 = EINTR;
- /* note: scr->pt.r10 is already -1 */
- break;
- case ERESTARTSYS:
- if ((ksig.ka.sa.sa_flags & SA_RESTART) == 0) {
- scr->pt.r8 = EINTR;
- /* note: scr->pt.r10 is already -1 */
- break;
- }
- fallthrough;
- case ERESTARTNOINTR:
- ia64_decrement_ip(&scr->pt);
- restart = 0; /* don't restart twice if handle_signal() fails... */
- }
- }
-
- /*
- * Whee! Actually deliver the signal. If the delivery failed, we need to
- * continue to iterate in this loop so we can deliver the SIGSEGV...
- */
- if (handle_signal(&ksig, scr))
- return;
- }
-
- /* Did we come from a system call? */
- if (restart) {
- /* Restart the system call - no handlers present */
- if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR
- || errno == ERESTART_RESTARTBLOCK)
- {
- /*
- * Note: the syscall number is in r15 which is saved in
- * pt_regs so all we need to do here is adjust ip so that
- * the "break" instruction gets re-executed.
- */
- ia64_decrement_ip(&scr->pt);
- if (errno == ERESTART_RESTARTBLOCK)
- scr->pt.r15 = __NR_restart_syscall;
- }
- }
-
- /* if there's no signal to deliver, we just put the saved sigmask
- * back */
- restore_saved_sigmask();
-}
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
deleted file mode 100644
index ea4f009a23..0000000000
--- a/arch/ia64/kernel/smp.c
+++ /dev/null
@@ -1,335 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SMP Support
- *
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Lots of stuff stolen from arch/alpha/kernel/smp.c
- *
- * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
- * the existing code (on the lines of x86 port).
- * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
- * calibration on each CPU.
- * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
- * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
- * & cpu_online_map now gets done here (instead of setup.c)
- * 99/10/05 davidm Update to bring it in sync with new command-line processing
- * scheme.
- * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
- * smp_call_function_single to resend IPI on timeouts
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/mm.h>
-#include <linux/cache.h>
-#include <linux/delay.h>
-#include <linux/efi.h>
-#include <linux/bitops.h>
-#include <linux/kexec.h>
-
-#include <linux/atomic.h>
-#include <asm/current.h>
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/tlbflush.h>
-#include <asm/unistd.h>
-#include <asm/mca.h>
-#include <asm/xtp.h>
-
-/*
- * Note: alignment of 4 entries/cacheline was empirically determined
- * to be a good tradeoff between hot cachelines & spreading the array
- * across too many cacheline.
- */
-static struct local_tlb_flush_counts {
- unsigned int count;
-} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
- shadow_flush_counts);
-
-#define IPI_CALL_FUNC 0
-#define IPI_CPU_STOP 1
-#define IPI_CALL_FUNC_SINGLE 2
-#define IPI_KDUMP_CPU_STOP 3
-
-/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
-static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation);
-
-extern void cpu_halt (void);
-
-static void
-stop_this_cpu(void)
-{
- /*
- * Remove this CPU:
- */
- set_cpu_online(smp_processor_id(), false);
- max_xtp();
- local_irq_disable();
- cpu_halt();
-}
-
-void
-cpu_die(void)
-{
- max_xtp();
- local_irq_disable();
- cpu_halt();
- /* Should never be here */
- BUG();
- for (;;);
-}
-
-irqreturn_t
-handle_IPI (int irq, void *dev_id)
-{
- int this_cpu = get_cpu();
- unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
- unsigned long ops;
-
- mb(); /* Order interrupt and bit testing. */
- while ((ops = xchg(pending_ipis, 0)) != 0) {
- mb(); /* Order bit clearing and data access. */
- do {
- unsigned long which;
-
- which = ffz(~ops);
- ops &= ~(1 << which);
-
- switch (which) {
- case IPI_CPU_STOP:
- stop_this_cpu();
- break;
- case IPI_CALL_FUNC:
- generic_smp_call_function_interrupt();
- break;
- case IPI_CALL_FUNC_SINGLE:
- generic_smp_call_function_single_interrupt();
- break;
-#ifdef CONFIG_KEXEC
- case IPI_KDUMP_CPU_STOP:
- unw_init_running(kdump_cpu_freeze, NULL);
- break;
-#endif
- default:
- printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
- this_cpu, which);
- break;
- }
- } while (ops);
- mb(); /* Order data access and bit testing. */
- }
- put_cpu();
- return IRQ_HANDLED;
-}
-
-
-
-/*
- * Called with preemption disabled.
- */
-static inline void
-send_IPI_single (int dest_cpu, int op)
-{
- set_bit(op, &per_cpu(ipi_operation, dest_cpu));
- ia64_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
-}
-
-/*
- * Called with preemption disabled.
- */
-static inline void
-send_IPI_allbutself (int op)
-{
- unsigned int i;
-
- for_each_online_cpu(i) {
- if (i != smp_processor_id())
- send_IPI_single(i, op);
- }
-}
-
-/*
- * Called with preemption disabled.
- */
-static inline void
-send_IPI_mask(const struct cpumask *mask, int op)
-{
- unsigned int cpu;
-
- for_each_cpu(cpu, mask) {
- send_IPI_single(cpu, op);
- }
-}
-
-/*
- * Called with preemption disabled.
- */
-static inline void
-send_IPI_all (int op)
-{
- int i;
-
- for_each_online_cpu(i) {
- send_IPI_single(i, op);
- }
-}
-
-/*
- * Called with preemption disabled.
- */
-static inline void
-send_IPI_self (int op)
-{
- send_IPI_single(smp_processor_id(), op);
-}
-
-#ifdef CONFIG_KEXEC
-void
-kdump_smp_send_stop(void)
-{
- send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
-}
-
-void
-kdump_smp_send_init(void)
-{
- unsigned int cpu, self_cpu;
- self_cpu = smp_processor_id();
- for_each_online_cpu(cpu) {
- if (cpu != self_cpu) {
- if(kdump_status[cpu] == 0)
- ia64_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
- }
- }
-}
-#endif
-/*
- * Called with preemption disabled.
- */
-void
-arch_smp_send_reschedule (int cpu)
-{
- ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
-}
-EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
-
-/*
- * Called with preemption disabled.
- */
-static void
-smp_send_local_flush_tlb (int cpu)
-{
- ia64_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
-}
-
-void
-smp_local_flush_tlb(void)
-{
- /*
- * Use atomic ops. Otherwise, the load/increment/store sequence from
- * a "++" operation can have the line stolen between the load & store.
- * The overhead of the atomic op in negligible in this case & offers
- * significant benefit for the brief periods where lots of cpus
- * are simultaneously flushing TLBs.
- */
- ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
- local_flush_tlb_all();
-}
-
-#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
-
-void
-smp_flush_tlb_cpumask(cpumask_t xcpumask)
-{
- unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts);
- cpumask_t cpumask = xcpumask;
- int mycpu, cpu, flush_mycpu = 0;
-
- preempt_disable();
- mycpu = smp_processor_id();
-
- for_each_cpu(cpu, &cpumask)
- counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
-
- mb();
- for_each_cpu(cpu, &cpumask) {
- if (cpu == mycpu)
- flush_mycpu = 1;
- else
- smp_send_local_flush_tlb(cpu);
- }
-
- if (flush_mycpu)
- smp_local_flush_tlb();
-
- for_each_cpu(cpu, &cpumask)
- while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
- udelay(FLUSH_DELAY);
-
- preempt_enable();
-}
-
-void
-smp_flush_tlb_all (void)
-{
- on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
-}
-
-void
-smp_flush_tlb_mm (struct mm_struct *mm)
-{
- cpumask_var_t cpus;
- preempt_disable();
- /* this happens for the common case of a single-threaded fork(): */
- if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
- {
- local_finish_flush_tlb_mm(mm);
- preempt_enable();
- return;
- }
- if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
- smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
- mm, 1);
- } else {
- cpumask_copy(cpus, mm_cpumask(mm));
- smp_call_function_many(cpus,
- (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
- free_cpumask_var(cpus);
- }
- local_irq_disable();
- local_finish_flush_tlb_mm(mm);
- local_irq_enable();
- preempt_enable();
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
-}
-
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- send_IPI_mask(mask, IPI_CALL_FUNC);
-}
-
-/*
- * this function calls the 'stop' function on all other CPUs in the system.
- */
-void
-smp_send_stop (void)
-{
- send_IPI_allbutself(IPI_CPU_STOP);
-}
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
deleted file mode 100644
index d0e935cf20..0000000000
--- a/arch/ia64/kernel/smpboot.c
+++ /dev/null
@@ -1,839 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SMP boot-related support
- *
- * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2001, 2004-2005 Intel Corp
- * Rohit Seth <rohit.seth@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Gordon Jin <gordon.jin@intel.com>
- * Ashok Raj <ashok.raj@intel.com>
- *
- * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
- * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
- * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
- * smp_boot_cpus()/smp_commence() is replaced by
- * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
- * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
- * 04/12/26 Jin Gordon <gordon.jin@intel.com>
- * 04/12/26 Rohit Seth <rohit.seth@intel.com>
- * Add multi-threading and multi-core detection
- * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
- * Setup cpu_sibling_map and cpu_core_map
- */
-
-#include <linux/module.h>
-#include <linux/acpi.h>
-#include <linux/memblock.h>
-#include <linux/cpu.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/mm.h>
-#include <linux/notifier.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/efi.h>
-#include <linux/percpu.h>
-#include <linux/bitops.h>
-
-#include <linux/atomic.h>
-#include <asm/cache.h>
-#include <asm/current.h>
-#include <asm/delay.h>
-#include <asm/efi.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/mca.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/tlbflush.h>
-#include <asm/unistd.h>
-
-#define SMP_DEBUG 0
-
-#if SMP_DEBUG
-#define Dprintk(x...) printk(x)
-#else
-#define Dprintk(x...)
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-#ifdef CONFIG_PERMIT_BSP_REMOVE
-#define bsp_remove_ok 1
-#else
-#define bsp_remove_ok 0
-#endif
-
-/*
- * Global array allocated for NR_CPUS at boot time
- */
-struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
-
-/*
- * start_ap in head.S uses this to store current booting cpu
- * info.
- */
-struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
-
-#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
-
-#else
-#define set_brendez_area(x)
-#endif
-
-
-/*
- * ITC synchronization related stuff:
- */
-#define MASTER (0)
-#define SLAVE (SMP_CACHE_BYTES/8)
-
-#define NUM_ROUNDS 64 /* magic value */
-#define NUM_ITERS 5 /* likewise */
-
-static DEFINE_SPINLOCK(itc_sync_lock);
-static volatile unsigned long go[SLAVE + 1];
-
-#define DEBUG_ITC_SYNC 0
-
-extern void start_ap (void);
-extern unsigned long ia64_iobase;
-
-struct task_struct *task_for_booting_cpu;
-
-/*
- * State for each CPU
- */
-DEFINE_PER_CPU(int, cpu_state);
-
-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_core_map);
-DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
-EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
-
-int smp_num_siblings = 1;
-
-/* which logical CPU number maps to which CPU (physical APIC ID) */
-volatile int ia64_cpu_to_sapicid[NR_CPUS];
-EXPORT_SYMBOL(ia64_cpu_to_sapicid);
-
-static cpumask_t cpu_callin_map;
-
-struct smp_boot_data smp_boot_data __initdata;
-
-unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
-
-char __initdata no_int_routing;
-
-unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
-
-#ifdef CONFIG_FORCE_CPEI_RETARGET
-#define CPEI_OVERRIDE_DEFAULT (1)
-#else
-#define CPEI_OVERRIDE_DEFAULT (0)
-#endif
-
-unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
-
-static int __init
-cmdl_force_cpei(char *str)
-{
- int value=0;
-
- get_option (&str, &value);
- force_cpei_retarget = value;
-
- return 1;
-}
-
-__setup("force_cpei=", cmdl_force_cpei);
-
-static int __init
-nointroute (char *str)
-{
- no_int_routing = 1;
- printk ("no_int_routing on\n");
- return 1;
-}
-
-__setup("nointroute", nointroute);
-
-static void fix_b0_for_bsp(void)
-{
-#ifdef CONFIG_HOTPLUG_CPU
- int cpuid;
- static int fix_bsp_b0 = 1;
-
- cpuid = smp_processor_id();
-
- /*
- * Cache the b0 value on the first AP that comes up
- */
- if (!(fix_bsp_b0 && cpuid))
- return;
-
- sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
- printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
-
- fix_bsp_b0 = 0;
-#endif
-}
-
-void
-sync_master (void *arg)
-{
- unsigned long flags, i;
-
- go[MASTER] = 0;
-
- local_irq_save(flags);
- {
- for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
- while (!go[MASTER])
- cpu_relax();
- go[MASTER] = 0;
- go[SLAVE] = ia64_get_itc();
- }
- }
- local_irq_restore(flags);
-}
-
-/*
- * Return the number of cycles by which our itc differs from the itc on the master
- * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
- * negative that it is behind.
- */
-static inline long
-get_delta (long *rt, long *master)
-{
- unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
- unsigned long tcenter, t0, t1, tm;
- long i;
-
- for (i = 0; i < NUM_ITERS; ++i) {
- t0 = ia64_get_itc();
- go[MASTER] = 1;
- while (!(tm = go[SLAVE]))
- cpu_relax();
- go[SLAVE] = 0;
- t1 = ia64_get_itc();
-
- if (t1 - t0 < best_t1 - best_t0)
- best_t0 = t0, best_t1 = t1, best_tm = tm;
- }
-
- *rt = best_t1 - best_t0;
- *master = best_tm - best_t0;
-
- /* average best_t0 and best_t1 without overflow: */
- tcenter = (best_t0/2 + best_t1/2);
- if (best_t0 % 2 + best_t1 % 2 == 2)
- ++tcenter;
- return tcenter - best_tm;
-}
-
-/*
- * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
- * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
- * unaccounted-for errors (such as getting a machine check in the middle of a calibration
- * step). The basic idea is for the slave to ask the master what itc value it has and to
- * read its own itc before and after the master responds. Each iteration gives us three
- * timestamps:
- *
- * slave master
- *
- * t0 ---\
- * ---\
- * --->
- * tm
- * /---
- * /---
- * t1 <---
- *
- *
- * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
- * and t1. If we achieve this, the clocks are synchronized provided the interconnect
- * between the slave and the master is symmetric. Even if the interconnect were
- * asymmetric, we would still know that the synchronization error is smaller than the
- * roundtrip latency (t0 - t1).
- *
- * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
- * within one or two cycles. However, we can only *guarantee* that the synchronization is
- * accurate to within a round-trip time, which is typically in the range of several
- * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
- * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
- * than half a micro second or so.
- */
-void
-ia64_sync_itc (unsigned int master)
-{
- long i, delta, adj, adjust_latency = 0, done = 0;
- unsigned long flags, rt, master_time_stamp, bound;
-#if DEBUG_ITC_SYNC
- struct {
- long rt; /* roundtrip time */
- long master; /* master's timestamp */
- long diff; /* difference between midpoint and master's timestamp */
- long lat; /* estimate of itc adjustment latency */
- } t[NUM_ROUNDS];
-#endif
-
- /*
- * Make sure local timer ticks are disabled while we sync. If
- * they were enabled, we'd have to worry about nasty issues
- * like setting the ITC ahead of (or a long time before) the
- * next scheduled tick.
- */
- BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
-
- go[MASTER] = 1;
-
- if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
- printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
- return;
- }
-
- while (go[MASTER])
- cpu_relax(); /* wait for master to be ready */
-
- spin_lock_irqsave(&itc_sync_lock, flags);
- {
- for (i = 0; i < NUM_ROUNDS; ++i) {
- delta = get_delta(&rt, &master_time_stamp);
- if (delta == 0) {
- done = 1; /* let's lock on to this... */
- bound = rt;
- }
-
- if (!done) {
- if (i > 0) {
- adjust_latency += -delta;
- adj = -delta + adjust_latency/4;
- } else
- adj = -delta;
-
- ia64_set_itc(ia64_get_itc() + adj);
- }
-#if DEBUG_ITC_SYNC
- t[i].rt = rt;
- t[i].master = master_time_stamp;
- t[i].diff = delta;
- t[i].lat = adjust_latency/4;
-#endif
- }
- }
- spin_unlock_irqrestore(&itc_sync_lock, flags);
-
-#if DEBUG_ITC_SYNC
- for (i = 0; i < NUM_ROUNDS; ++i)
- printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
- t[i].rt, t[i].master, t[i].diff, t[i].lat);
-#endif
-
- printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
- "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
-}
-
-/*
- * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
- */
-static inline void smp_setup_percpu_timer(void)
-{
-}
-
-static void
-smp_callin (void)
-{
- int cpuid, phys_id, itc_master;
- struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
- extern void ia64_init_itm(void);
- extern volatile int time_keeper_id;
-
- cpuid = smp_processor_id();
- phys_id = hard_smp_processor_id();
- itc_master = time_keeper_id;
-
- if (cpu_online(cpuid)) {
- printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
- phys_id, cpuid);
- BUG();
- }
-
- fix_b0_for_bsp();
-
- /*
- * numa_node_id() works after this.
- */
- set_numa_node(cpu_to_node_map[cpuid]);
- set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
-
- spin_lock(&vector_lock);
- /* Setup the per cpu irq handling data structures */
- __setup_vector_irq(cpuid);
- notify_cpu_starting(cpuid);
- set_cpu_online(cpuid, true);
- per_cpu(cpu_state, cpuid) = CPU_ONLINE;
- spin_unlock(&vector_lock);
-
- smp_setup_percpu_timer();
-
- ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
-
- local_irq_enable();
-
- if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
- /*
- * Synchronize the ITC with the BP. Need to do this after irqs are
- * enabled because ia64_sync_itc() calls smp_call_function_single(), which
- * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
- * local_bh_enable(), which bugs out if irqs are not enabled...
- */
- Dprintk("Going to syncup ITC with ITC Master.\n");
- ia64_sync_itc(itc_master);
- }
-
- /*
- * Get our bogomips.
- */
- ia64_init_itm();
-
- /*
- * Delay calibration can be skipped if new processor is identical to the
- * previous processor.
- */
- last_cpuinfo = cpu_data(cpuid - 1);
- this_cpuinfo = local_cpu_data;
- if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
- last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
- last_cpuinfo->features != this_cpuinfo->features ||
- last_cpuinfo->revision != this_cpuinfo->revision ||
- last_cpuinfo->family != this_cpuinfo->family ||
- last_cpuinfo->archrev != this_cpuinfo->archrev ||
- last_cpuinfo->model != this_cpuinfo->model)
- calibrate_delay();
- local_cpu_data->loops_per_jiffy = loops_per_jiffy;
-
- /*
- * Allow the master to continue.
- */
- cpumask_set_cpu(cpuid, &cpu_callin_map);
- Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
-}
-
-
-/*
- * Activate a secondary processor. head.S calls this.
- */
-int
-start_secondary (void *unused)
-{
- /* Early console may use I/O ports */
- ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
-#ifndef CONFIG_PRINTK_TIME
- Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
-#endif
- efi_map_pal_code();
- cpu_init();
- smp_callin();
-
- cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
- return 0;
-}
-
-static int
-do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
-{
- int timeout;
-
- task_for_booting_cpu = idle;
- Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
-
- set_brendez_area(cpu);
- ia64_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
-
- /*
- * Wait 10s total for the AP to start
- */
- Dprintk("Waiting on callin_map ...");
- for (timeout = 0; timeout < 100000; timeout++) {
- if (cpumask_test_cpu(cpu, &cpu_callin_map))
- break; /* It has booted */
- barrier(); /* Make sure we re-read cpu_callin_map */
- udelay(100);
- }
- Dprintk("\n");
-
- if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
- printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
- ia64_cpu_to_sapicid[cpu] = -1;
- set_cpu_online(cpu, false); /* was set in smp_callin() */
- return -EINVAL;
- }
- return 0;
-}
-
-static int __init
-decay (char *str)
-{
- int ticks;
- get_option (&str, &ticks);
- return 1;
-}
-
-__setup("decay=", decay);
-
-/*
- * Initialize the logical CPU number to SAPICID mapping
- */
-void __init
-smp_build_cpu_map (void)
-{
- int sapicid, cpu, i;
- int boot_cpu_id = hard_smp_processor_id();
-
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- ia64_cpu_to_sapicid[cpu] = -1;
- }
-
- ia64_cpu_to_sapicid[0] = boot_cpu_id;
- init_cpu_present(cpumask_of(0));
- set_cpu_possible(0, true);
- for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
- sapicid = smp_boot_data.cpu_phys_id[i];
- if (sapicid == boot_cpu_id)
- continue;
- set_cpu_present(cpu, true);
- set_cpu_possible(cpu, true);
- ia64_cpu_to_sapicid[cpu] = sapicid;
- cpu++;
- }
-}
-
-/*
- * Cycle through the APs sending Wakeup IPIs to boot each.
- */
-void __init
-smp_prepare_cpus (unsigned int max_cpus)
-{
- int boot_cpu_id = hard_smp_processor_id();
-
- /*
- * Initialize the per-CPU profiling counter/multiplier
- */
-
- smp_setup_percpu_timer();
-
- cpumask_set_cpu(0, &cpu_callin_map);
-
- local_cpu_data->loops_per_jiffy = loops_per_jiffy;
- ia64_cpu_to_sapicid[0] = boot_cpu_id;
-
- printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
-
- current_thread_info()->cpu = 0;
-
- /*
- * If SMP should be disabled, then really disable it!
- */
- if (!max_cpus) {
- printk(KERN_INFO "SMP mode deactivated.\n");
- init_cpu_online(cpumask_of(0));
- init_cpu_present(cpumask_of(0));
- init_cpu_possible(cpumask_of(0));
- return;
- }
-}
-
-void smp_prepare_boot_cpu(void)
-{
- set_cpu_online(smp_processor_id(), true);
- cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
- set_numa_node(cpu_to_node_map[smp_processor_id()]);
- per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static inline void
-clear_cpu_sibling_map(int cpu)
-{
- int i;
-
- for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
- cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
- for_each_cpu(i, &cpu_core_map[cpu])
- cpumask_clear_cpu(cpu, &cpu_core_map[i]);
-
- per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
-}
-
-static void
-remove_siblinginfo(int cpu)
-{
- if (cpu_data(cpu)->threads_per_core == 1 &&
- cpu_data(cpu)->cores_per_socket == 1) {
- cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
- cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
- return;
- }
-
- /* remove it from all sibling map's */
- clear_cpu_sibling_map(cpu);
-}
-
-extern void fixup_irqs(void);
-
-int migrate_platform_irqs(unsigned int cpu)
-{
- int new_cpei_cpu;
- struct irq_data *data = NULL;
- const struct cpumask *mask;
- int retval = 0;
-
- /*
- * dont permit CPEI target to removed.
- */
- if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
- printk ("CPU (%d) is CPEI Target\n", cpu);
- if (can_cpei_retarget()) {
- /*
- * Now re-target the CPEI to a different processor
- */
- new_cpei_cpu = cpumask_any(cpu_online_mask);
- mask = cpumask_of(new_cpei_cpu);
- set_cpei_target_cpu(new_cpei_cpu);
- data = irq_get_irq_data(ia64_cpe_irq);
- /*
- * Switch for now, immediately, we need to do fake intr
- * as other interrupts, but need to study CPEI behaviour with
- * polling before making changes.
- */
- if (data && data->chip) {
- data->chip->irq_disable(data);
- data->chip->irq_set_affinity(data, mask, false);
- data->chip->irq_enable(data);
- printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
- }
- }
- if (!data) {
- printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
- retval = -EBUSY;
- }
- }
- return retval;
-}
-
-/* must be called with cpucontrol mutex held */
-int __cpu_disable(void)
-{
- int cpu = smp_processor_id();
-
- /*
- * dont permit boot processor for now
- */
- if (cpu == 0 && !bsp_remove_ok) {
- printk ("Your platform does not support removal of BSP\n");
- return (-EBUSY);
- }
-
- set_cpu_online(cpu, false);
-
- if (migrate_platform_irqs(cpu)) {
- set_cpu_online(cpu, true);
- return -EBUSY;
- }
-
- remove_siblinginfo(cpu);
- fixup_irqs();
- local_flush_tlb_all();
- cpumask_clear_cpu(cpu, &cpu_callin_map);
- return 0;
-}
-
-void __cpu_die(unsigned int cpu)
-{
- unsigned int i;
-
- for (i = 0; i < 100; i++) {
- /* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD)
- {
- printk ("CPU %d is now offline\n", cpu);
- return;
- }
- msleep(100);
- }
- printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-void
-smp_cpus_done (unsigned int dummy)
-{
- int cpu;
- unsigned long bogosum = 0;
-
- /*
- * Allow the user to impress friends.
- */
-
- for_each_online_cpu(cpu) {
- bogosum += cpu_data(cpu)->loops_per_jiffy;
- }
-
- printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
-}
-
-static inline void set_cpu_sibling_map(int cpu)
-{
- int i;
-
- for_each_online_cpu(i) {
- if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
- cpumask_set_cpu(i, &cpu_core_map[cpu]);
- cpumask_set_cpu(cpu, &cpu_core_map[i]);
- if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
- cpumask_set_cpu(i,
- &per_cpu(cpu_sibling_map, cpu));
- cpumask_set_cpu(cpu,
- &per_cpu(cpu_sibling_map, i));
- }
- }
- }
-}
-
-int
-__cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
- int ret;
- int sapicid;
-
- sapicid = ia64_cpu_to_sapicid[cpu];
- if (sapicid == -1)
- return -EINVAL;
-
- /*
- * Already booted cpu? not valid anymore since we dont
- * do idle loop tightspin anymore.
- */
- if (cpumask_test_cpu(cpu, &cpu_callin_map))
- return -EINVAL;
-
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
- /* Processor goes to start_secondary(), sets online flag */
- ret = do_boot_cpu(sapicid, cpu, tidle);
- if (ret < 0)
- return ret;
-
- if (cpu_data(cpu)->threads_per_core == 1 &&
- cpu_data(cpu)->cores_per_socket == 1) {
- cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
- cpumask_set_cpu(cpu, &cpu_core_map[cpu]);
- return 0;
- }
-
- set_cpu_sibling_map(cpu);
-
- return 0;
-}
-
-/*
- * Assume that CPUs have been discovered by some platform-dependent interface. For
- * SoftSDV/Lion, that would be ACPI.
- *
- * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
- */
-void __init
-init_smp_config(void)
-{
- struct fptr {
- unsigned long fp;
- unsigned long gp;
- } *ap_startup;
- long sal_ret;
-
- /* Tell SAL where to drop the APs. */
- ap_startup = (struct fptr *) start_ap;
- sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
- ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
- if (sal_ret < 0)
- printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
- ia64_sal_strerror(sal_ret));
-}
-
-/*
- * identify_siblings(cpu) gets called from identify_cpu. This populates the
- * information related to logical execution units in per_cpu_data structure.
- */
-void identify_siblings(struct cpuinfo_ia64 *c)
-{
- long status;
- u16 pltid;
- pal_logical_to_physical_t info;
-
- status = ia64_pal_logical_to_phys(-1, &info);
- if (status != PAL_STATUS_SUCCESS) {
- if (status != PAL_STATUS_UNIMPLEMENTED) {
- printk(KERN_ERR
- "ia64_pal_logical_to_phys failed with %ld\n",
- status);
- return;
- }
-
- info.overview_ppid = 0;
- info.overview_cpp = 1;
- info.overview_tpc = 1;
- }
-
- status = ia64_sal_physical_id_info(&pltid);
- if (status != PAL_STATUS_SUCCESS) {
- if (status != PAL_STATUS_UNIMPLEMENTED)
- printk(KERN_ERR
- "ia64_sal_pltid failed with %ld\n",
- status);
- return;
- }
-
- c->socket_id = (pltid << 8) | info.overview_ppid;
-
- if (info.overview_cpp == 1 && info.overview_tpc == 1)
- return;
-
- c->cores_per_socket = info.overview_cpp;
- c->threads_per_core = info.overview_tpc;
- c->num_log = info.overview_num_log;
-
- c->core_id = info.log1_cid;
- c->thread_id = info.log1_tid;
-}
-
-/*
- * returns non zero, if multi-threading is enabled
- * on at least one physical package. Due to hotplug cpu
- * and (maxcpus=), all threads may not necessarily be enabled
- * even though the processor supports multi-threading.
- */
-int is_multithreading_enabled(void)
-{
- int i, j;
-
- for_each_present_cpu(i) {
- for_each_present_cpu(j) {
- if (j == i)
- continue;
- if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
- if (cpu_data(j)->core_id == cpu_data(i)->core_id)
- return 1;
- }
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(is_multithreading_enabled);
diff --git a/arch/ia64/kernel/stacktrace.c b/arch/ia64/kernel/stacktrace.c
deleted file mode 100644
index 6e583a6bd2..0000000000
--- a/arch/ia64/kernel/stacktrace.c
+++ /dev/null
@@ -1,40 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/ia64/kernel/stacktrace.c
- *
- * Stack trace management functions
- *
- */
-#include <linux/sched.h>
-#include <linux/stacktrace.h>
-#include <linux/module.h>
-
-static void
-ia64_do_save_stack(struct unw_frame_info *info, void *arg)
-{
- struct stack_trace *trace = arg;
- unsigned long ip;
- int skip = trace->skip;
-
- trace->nr_entries = 0;
- do {
- unw_get_ip(info, &ip);
- if (ip == 0)
- break;
- if (skip == 0) {
- trace->entries[trace->nr_entries++] = ip;
- if (trace->nr_entries == trace->max_entries)
- break;
- } else
- skip--;
- } while (unw_unwind(info) >= 0);
-}
-
-/*
- * Save stack-backtrace addresses into a stack_trace buffer.
- */
-void save_stack_trace(struct stack_trace *trace)
-{
- unw_init_running(ia64_do_save_stack, trace);
-}
-EXPORT_SYMBOL(save_stack_trace);
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
deleted file mode 100644
index eb561cc936..0000000000
--- a/arch/ia64/kernel/sys_ia64.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file contains various system calls that have different calling
- * conventions on different platforms.
- *
- * Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task_stack.h>
-#include <linux/shm.h>
-#include <linux/file.h> /* doh, must come after sched.h... */
-#include <linux/smp.h>
-#include <linux/syscalls.h>
-#include <linux/highuid.h>
-#include <linux/hugetlb.h>
-
-#include <asm/shmparam.h>
-#include <linux/uaccess.h>
-
-unsigned long
-arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags)
-{
- long map_shared = (flags & MAP_SHARED);
- unsigned long align_mask = 0;
- struct mm_struct *mm = current->mm;
- struct vm_unmapped_area_info info;
-
- if (len > RGN_MAP_LIMIT)
- return -ENOMEM;
-
- /* handle fixed mapping: prevent overlap with huge pages */
- if (flags & MAP_FIXED) {
- if (is_hugepage_only_range(mm, addr, len))
- return -EINVAL;
- return addr;
- }
-
-#ifdef CONFIG_HUGETLB_PAGE
- if (REGION_NUMBER(addr) == RGN_HPAGE)
- addr = 0;
-#endif
- if (!addr)
- addr = TASK_UNMAPPED_BASE;
-
- if (map_shared && (TASK_SIZE > 0xfffffffful))
- /*
- * For 64-bit tasks, align shared segments to 1MB to avoid potential
- * performance penalty due to virtual aliasing (see ASDM). For 32-bit
- * tasks, we prefer to avoid exhausting the address space too quickly by
- * limiting alignment to a single page.
- */
- align_mask = PAGE_MASK & (SHMLBA - 1);
-
- info.flags = 0;
- info.length = len;
- info.low_limit = addr;
- info.high_limit = TASK_SIZE;
- info.align_mask = align_mask;
- info.align_offset = pgoff << PAGE_SHIFT;
- return vm_unmapped_area(&info);
-}
-
-asmlinkage long
-ia64_getpriority (int which, int who)
-{
- long prio;
-
- prio = sys_getpriority(which, who);
- if (prio >= 0) {
- force_successful_syscall_return();
- prio = 20 - prio;
- }
- return prio;
-}
-
-/* XXX obsolete, but leave it here until the old libc is gone... */
-asmlinkage unsigned long
-sys_getpagesize (void)
-{
- return PAGE_SIZE;
-}
-
-asmlinkage unsigned long
-ia64_brk (unsigned long brk)
-{
- unsigned long retval = sys_brk(brk);
- force_successful_syscall_return();
- return retval;
-}
-
-/*
- * On IA-64, we return the two file descriptors in ret0 and ret1 (r8
- * and r9) as this is faster than doing a copy_to_user().
- */
-asmlinkage long
-sys_ia64_pipe (void)
-{
- struct pt_regs *regs = task_pt_regs(current);
- int fd[2];
- int retval;
-
- retval = do_pipe_flags(fd, 0);
- if (retval)
- goto out;
- retval = fd[0];
- regs->r9 = fd[1];
- out:
- return retval;
-}
-
-int ia64_mmap_check(unsigned long addr, unsigned long len,
- unsigned long flags)
-{
- unsigned long roff;
-
- /*
- * Don't permit mappings into unmapped space, the virtual page table
- * of a region, or across a region boundary. Note: RGN_MAP_LIMIT is
- * equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0.
- */
- roff = REGION_OFFSET(addr);
- if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len)))
- return -EINVAL;
- return 0;
-}
-
-/*
- * mmap2() is like mmap() except that the offset is expressed in units
- * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
- * of) files that are larger than the address space of the CPU.
- */
-asmlinkage unsigned long
-sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
-{
- addr = ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
- if (!IS_ERR_VALUE(addr))
- force_successful_syscall_return();
- return addr;
-}
-
-asmlinkage unsigned long
-sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off)
-{
- if (offset_in_page(off) != 0)
- return -EINVAL;
-
- addr = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
- if (!IS_ERR_VALUE(addr))
- force_successful_syscall_return();
- return addr;
-}
-
-asmlinkage unsigned long
-ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
- unsigned long new_addr)
-{
- addr = sys_mremap(addr, old_len, new_len, flags, new_addr);
- if (!IS_ERR_VALUE(addr))
- force_successful_syscall_return();
- return addr;
-}
-
-asmlinkage long
-ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
-{
- struct timespec64 rtn_tp;
- s64 tick_ns;
-
- /*
- * ia64's clock_gettime() syscall is implemented as a vdso call
- * fsys_clock_gettime(). Currently it handles only
- * CLOCK_REALTIME and CLOCK_MONOTONIC. Both are based on
- * 'ar.itc' counter which gets incremented at a constant
- * frequency. It's usually 400MHz, ~2.5x times slower than CPU
- * clock frequency. Which is almost a 1ns hrtimer, but not quite.
- *
- * Let's special-case these timers to report correct precision
- * based on ITC frequency and not HZ frequency for supported
- * clocks.
- */
- switch (which_clock) {
- case CLOCK_REALTIME:
- case CLOCK_MONOTONIC:
- tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
- rtn_tp = ns_to_timespec64(tick_ns);
- return put_timespec64(&rtn_tp, tp);
- }
-
- return sys_clock_getres(which_clock, tp);
-}
diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile
deleted file mode 100644
index d009f927a0..0000000000
--- a/arch/ia64/kernel/syscalls/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-kapi := arch/$(SRCARCH)/include/generated/asm
-uapi := arch/$(SRCARCH)/include/generated/uapi/asm
-
-$(shell mkdir -p $(uapi) $(kapi))
-
-syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/scripts/syscallhdr.sh
-systbl := $(srctree)/scripts/syscalltbl.sh
-
-quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --offset __NR_Linux $< $@
-
-quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
-
-$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
- $(call if_changed,systbl)
-
-uapisyshdr-y += unistd_64.h
-kapisyshdr-y += syscall_table.h
-
-uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
-kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
-targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
-
-PHONY += all
-all: $(uapisyshdr-y) $(kapisyshdr-y)
- @:
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
deleted file mode 100644
index 83d8609aec..0000000000
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ /dev/null
@@ -1,375 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
-#
-# Linux system call numbers and entry vectors for ia64
-#
-# The format is:
-# <number> <abi> <name> <entry point>
-#
-# Add 1024 to <number> will get the actual system call number
-#
-# The <abi> is always "common" for this file
-#
-0 common ni_syscall sys_ni_syscall
-1 common exit sys_exit
-2 common read sys_read
-3 common write sys_write
-4 common open sys_open
-5 common close sys_close
-6 common creat sys_creat
-7 common link sys_link
-8 common unlink sys_unlink
-9 common execve ia64_execve
-10 common chdir sys_chdir
-11 common fchdir sys_fchdir
-12 common utimes sys_utimes
-13 common mknod sys_mknod
-14 common chmod sys_chmod
-15 common chown sys_chown
-16 common lseek sys_lseek
-17 common getpid sys_getpid
-18 common getppid sys_getppid
-19 common mount sys_mount
-20 common umount2 sys_umount
-21 common setuid sys_setuid
-22 common getuid sys_getuid
-23 common geteuid sys_geteuid
-24 common ptrace sys_ptrace
-25 common access sys_access
-26 common sync sys_sync
-27 common fsync sys_fsync
-28 common fdatasync sys_fdatasync
-29 common kill sys_kill
-30 common rename sys_rename
-31 common mkdir sys_mkdir
-32 common rmdir sys_rmdir
-33 common dup sys_dup
-34 common pipe sys_ia64_pipe
-35 common times sys_times
-36 common brk ia64_brk
-37 common setgid sys_setgid
-38 common getgid sys_getgid
-39 common getegid sys_getegid
-40 common acct sys_acct
-41 common ioctl sys_ioctl
-42 common fcntl sys_fcntl
-43 common umask sys_umask
-44 common chroot sys_chroot
-45 common ustat sys_ustat
-46 common dup2 sys_dup2
-47 common setreuid sys_setreuid
-48 common setregid sys_setregid
-49 common getresuid sys_getresuid
-50 common setresuid sys_setresuid
-51 common getresgid sys_getresgid
-52 common setresgid sys_setresgid
-53 common getgroups sys_getgroups
-54 common setgroups sys_setgroups
-55 common getpgid sys_getpgid
-56 common setpgid sys_setpgid
-57 common setsid sys_setsid
-58 common getsid sys_getsid
-59 common sethostname sys_sethostname
-60 common setrlimit sys_setrlimit
-61 common getrlimit sys_getrlimit
-62 common getrusage sys_getrusage
-63 common gettimeofday sys_gettimeofday
-64 common settimeofday sys_settimeofday
-65 common select sys_select
-66 common poll sys_poll
-67 common symlink sys_symlink
-68 common readlink sys_readlink
-69 common uselib sys_uselib
-70 common swapon sys_swapon
-71 common swapoff sys_swapoff
-72 common reboot sys_reboot
-73 common truncate sys_truncate
-74 common ftruncate sys_ftruncate
-75 common fchmod sys_fchmod
-76 common fchown sys_fchown
-77 common getpriority ia64_getpriority
-78 common setpriority sys_setpriority
-79 common statfs sys_statfs
-80 common fstatfs sys_fstatfs
-81 common gettid sys_gettid
-82 common semget sys_semget
-83 common semop sys_semop
-84 common semctl sys_semctl
-85 common msgget sys_msgget
-86 common msgsnd sys_msgsnd
-87 common msgrcv sys_msgrcv
-88 common msgctl sys_msgctl
-89 common shmget sys_shmget
-90 common shmat sys_shmat
-91 common shmdt sys_shmdt
-92 common shmctl sys_shmctl
-93 common syslog sys_syslog
-94 common setitimer sys_setitimer
-95 common getitimer sys_getitimer
-# 1120 was old_stat
-# 1121 was old_lstat
-# 1122 was old_fstat
-99 common vhangup sys_vhangup
-100 common lchown sys_lchown
-101 common remap_file_pages sys_remap_file_pages
-102 common wait4 sys_wait4
-103 common sysinfo sys_sysinfo
-104 common clone sys_clone
-105 common setdomainname sys_setdomainname
-106 common uname sys_newuname
-107 common adjtimex sys_adjtimex
-# 1132 was create_module
-109 common init_module sys_init_module
-110 common delete_module sys_delete_module
-# 1135 was get_kernel_syms
-# 1136 was query_module
-113 common quotactl sys_quotactl
-114 common bdflush sys_ni_syscall
-115 common sysfs sys_sysfs
-116 common personality sys_personality
-117 common afs_syscall sys_ni_syscall
-118 common setfsuid sys_setfsuid
-119 common setfsgid sys_setfsgid
-120 common getdents sys_getdents
-121 common flock sys_flock
-122 common readv sys_readv
-123 common writev sys_writev
-124 common pread64 sys_pread64
-125 common pwrite64 sys_pwrite64
-126 common _sysctl sys_ni_syscall
-127 common mmap sys_mmap
-128 common munmap sys_munmap
-129 common mlock sys_mlock
-130 common mlockall sys_mlockall
-131 common mprotect sys_mprotect
-132 common mremap ia64_mremap
-133 common msync sys_msync
-134 common munlock sys_munlock
-135 common munlockall sys_munlockall
-136 common sched_getparam sys_sched_getparam
-137 common sched_setparam sys_sched_setparam
-138 common sched_getscheduler sys_sched_getscheduler
-139 common sched_setscheduler sys_sched_setscheduler
-140 common sched_yield sys_sched_yield
-141 common sched_get_priority_max sys_sched_get_priority_max
-142 common sched_get_priority_min sys_sched_get_priority_min
-143 common sched_rr_get_interval sys_sched_rr_get_interval
-144 common nanosleep sys_nanosleep
-145 common nfsservctl sys_ni_syscall
-146 common prctl sys_prctl
-147 common old_getpagesize sys_getpagesize
-148 common mmap2 sys_mmap2
-149 common pciconfig_read sys_pciconfig_read
-150 common pciconfig_write sys_pciconfig_write
-151 common perfmonctl sys_ni_syscall
-152 common sigaltstack sys_sigaltstack
-153 common rt_sigaction sys_rt_sigaction
-154 common rt_sigpending sys_rt_sigpending
-155 common rt_sigprocmask sys_rt_sigprocmask
-156 common rt_sigqueueinfo sys_rt_sigqueueinfo
-157 common rt_sigreturn sys_rt_sigreturn
-158 common rt_sigsuspend sys_rt_sigsuspend
-159 common rt_sigtimedwait sys_rt_sigtimedwait
-160 common getcwd sys_getcwd
-161 common capget sys_capget
-162 common capset sys_capset
-163 common sendfile sys_sendfile64
-164 common getpmsg sys_ni_syscall
-165 common putpmsg sys_ni_syscall
-166 common socket sys_socket
-167 common bind sys_bind
-168 common connect sys_connect
-169 common listen sys_listen
-170 common accept sys_accept
-171 common getsockname sys_getsockname
-172 common getpeername sys_getpeername
-173 common socketpair sys_socketpair
-174 common send sys_send
-175 common sendto sys_sendto
-176 common recv sys_recv
-177 common recvfrom sys_recvfrom
-178 common shutdown sys_shutdown
-179 common setsockopt sys_setsockopt
-180 common getsockopt sys_getsockopt
-181 common sendmsg sys_sendmsg
-182 common recvmsg sys_recvmsg
-183 common pivot_root sys_pivot_root
-184 common mincore sys_mincore
-185 common madvise sys_madvise
-186 common stat sys_newstat
-187 common lstat sys_newlstat
-188 common fstat sys_newfstat
-189 common clone2 sys_clone2
-190 common getdents64 sys_getdents64
-191 common getunwind sys_getunwind
-192 common readahead sys_readahead
-193 common setxattr sys_setxattr
-194 common lsetxattr sys_lsetxattr
-195 common fsetxattr sys_fsetxattr
-196 common getxattr sys_getxattr
-197 common lgetxattr sys_lgetxattr
-198 common fgetxattr sys_fgetxattr
-199 common listxattr sys_listxattr
-200 common llistxattr sys_llistxattr
-201 common flistxattr sys_flistxattr
-202 common removexattr sys_removexattr
-203 common lremovexattr sys_lremovexattr
-204 common fremovexattr sys_fremovexattr
-205 common tkill sys_tkill
-206 common futex sys_futex
-207 common sched_setaffinity sys_sched_setaffinity
-208 common sched_getaffinity sys_sched_getaffinity
-209 common set_tid_address sys_set_tid_address
-210 common fadvise64 sys_fadvise64_64
-211 common tgkill sys_tgkill
-212 common exit_group sys_exit_group
-213 common lookup_dcookie sys_lookup_dcookie
-214 common io_setup sys_io_setup
-215 common io_destroy sys_io_destroy
-216 common io_getevents sys_io_getevents
-217 common io_submit sys_io_submit
-218 common io_cancel sys_io_cancel
-219 common epoll_create sys_epoll_create
-220 common epoll_ctl sys_epoll_ctl
-221 common epoll_wait sys_epoll_wait
-222 common restart_syscall sys_restart_syscall
-223 common semtimedop sys_semtimedop
-224 common timer_create sys_timer_create
-225 common timer_settime sys_timer_settime
-226 common timer_gettime sys_timer_gettime
-227 common timer_getoverrun sys_timer_getoverrun
-228 common timer_delete sys_timer_delete
-229 common clock_settime sys_clock_settime
-230 common clock_gettime sys_clock_gettime
-231 common clock_getres ia64_clock_getres
-232 common clock_nanosleep sys_clock_nanosleep
-233 common fstatfs64 sys_fstatfs64
-234 common statfs64 sys_statfs64
-235 common mbind sys_mbind
-236 common get_mempolicy sys_get_mempolicy
-237 common set_mempolicy sys_set_mempolicy
-238 common mq_open sys_mq_open
-239 common mq_unlink sys_mq_unlink
-240 common mq_timedsend sys_mq_timedsend
-241 common mq_timedreceive sys_mq_timedreceive
-242 common mq_notify sys_mq_notify
-243 common mq_getsetattr sys_mq_getsetattr
-244 common kexec_load sys_kexec_load
-245 common vserver sys_ni_syscall
-246 common waitid sys_waitid
-247 common add_key sys_add_key
-248 common request_key sys_request_key
-249 common keyctl sys_keyctl
-250 common ioprio_set sys_ioprio_set
-251 common ioprio_get sys_ioprio_get
-252 common move_pages sys_move_pages
-253 common inotify_init sys_inotify_init
-254 common inotify_add_watch sys_inotify_add_watch
-255 common inotify_rm_watch sys_inotify_rm_watch
-256 common migrate_pages sys_migrate_pages
-257 common openat sys_openat
-258 common mkdirat sys_mkdirat
-259 common mknodat sys_mknodat
-260 common fchownat sys_fchownat
-261 common futimesat sys_futimesat
-262 common newfstatat sys_newfstatat
-263 common unlinkat sys_unlinkat
-264 common renameat sys_renameat
-265 common linkat sys_linkat
-266 common symlinkat sys_symlinkat
-267 common readlinkat sys_readlinkat
-268 common fchmodat sys_fchmodat
-269 common faccessat sys_faccessat
-270 common pselect6 sys_pselect6
-271 common ppoll sys_ppoll
-272 common unshare sys_unshare
-273 common splice sys_splice
-274 common set_robust_list sys_set_robust_list
-275 common get_robust_list sys_get_robust_list
-276 common sync_file_range sys_sync_file_range
-277 common tee sys_tee
-278 common vmsplice sys_vmsplice
-279 common fallocate sys_fallocate
-280 common getcpu sys_getcpu
-281 common epoll_pwait sys_epoll_pwait
-282 common utimensat sys_utimensat
-283 common signalfd sys_signalfd
-284 common timerfd sys_ni_syscall
-285 common eventfd sys_eventfd
-286 common timerfd_create sys_timerfd_create
-287 common timerfd_settime sys_timerfd_settime
-288 common timerfd_gettime sys_timerfd_gettime
-289 common signalfd4 sys_signalfd4
-290 common eventfd2 sys_eventfd2
-291 common epoll_create1 sys_epoll_create1
-292 common dup3 sys_dup3
-293 common pipe2 sys_pipe2
-294 common inotify_init1 sys_inotify_init1
-295 common preadv sys_preadv
-296 common pwritev sys_pwritev
-297 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
-298 common recvmmsg sys_recvmmsg
-299 common fanotify_init sys_fanotify_init
-300 common fanotify_mark sys_fanotify_mark
-301 common prlimit64 sys_prlimit64
-302 common name_to_handle_at sys_name_to_handle_at
-303 common open_by_handle_at sys_open_by_handle_at
-304 common clock_adjtime sys_clock_adjtime
-305 common syncfs sys_syncfs
-306 common setns sys_setns
-307 common sendmmsg sys_sendmmsg
-308 common process_vm_readv sys_process_vm_readv
-309 common process_vm_writev sys_process_vm_writev
-310 common accept4 sys_accept4
-311 common finit_module sys_finit_module
-312 common sched_setattr sys_sched_setattr
-313 common sched_getattr sys_sched_getattr
-314 common renameat2 sys_renameat2
-315 common getrandom sys_getrandom
-316 common memfd_create sys_memfd_create
-317 common bpf sys_bpf
-318 common execveat sys_execveat
-319 common userfaultfd sys_userfaultfd
-320 common membarrier sys_membarrier
-321 common kcmp sys_kcmp
-322 common mlock2 sys_mlock2
-323 common copy_file_range sys_copy_file_range
-324 common preadv2 sys_preadv2
-325 common pwritev2 sys_pwritev2
-326 common statx sys_statx
-327 common io_pgetevents sys_io_pgetevents
-328 common perf_event_open sys_perf_event_open
-329 common seccomp sys_seccomp
-330 common pkey_mprotect sys_pkey_mprotect
-331 common pkey_alloc sys_pkey_alloc
-332 common pkey_free sys_pkey_free
-333 common rseq sys_rseq
-# 334 through 423 are reserved to sync up with other architectures
-424 common pidfd_send_signal sys_pidfd_send_signal
-425 common io_uring_setup sys_io_uring_setup
-426 common io_uring_enter sys_io_uring_enter
-427 common io_uring_register sys_io_uring_register
-428 common open_tree sys_open_tree
-429 common move_mount sys_move_mount
-430 common fsopen sys_fsopen
-431 common fsconfig sys_fsconfig
-432 common fsmount sys_fsmount
-433 common fspick sys_fspick
-434 common pidfd_open sys_pidfd_open
-# 435 reserved for clone3
-436 common close_range sys_close_range
-437 common openat2 sys_openat2
-438 common pidfd_getfd sys_pidfd_getfd
-439 common faccessat2 sys_faccessat2
-440 common process_madvise sys_process_madvise
-441 common epoll_pwait2 sys_epoll_pwait2
-442 common mount_setattr sys_mount_setattr
-443 common quotactl_fd sys_quotactl_fd
-444 common landlock_create_ruleset sys_landlock_create_ruleset
-445 common landlock_add_rule sys_landlock_add_rule
-446 common landlock_restrict_self sys_landlock_restrict_self
-# 447 reserved for memfd_secret
-448 common process_mrelease sys_process_mrelease
-449 common futex_waitv sys_futex_waitv
-450 common set_mempolicy_home_node sys_set_mempolicy_home_node
-451 common cachestat sys_cachestat
-452 common fchmodat2 sys_fchmodat2
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
deleted file mode 100644
index 83ef044b63..0000000000
--- a/arch/ia64/kernel/time.c
+++ /dev/null
@@ -1,463 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/ia64/kernel/time.c
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger <davidm@hpl.hp.com>
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- * Copyright (C) 1999-2000 VA Linux Systems
- * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
- */
-
-#include <linux/cpu.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/profile.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/nmi.h>
-#include <linux/interrupt.h>
-#include <linux/efi.h>
-#include <linux/timex.h>
-#include <linux/timekeeper_internal.h>
-#include <linux/platform_device.h>
-#include <linux/sched/cputime.h>
-
-#include <asm/cputime.h>
-#include <asm/delay.h>
-#include <asm/efi.h>
-#include <asm/hw_irq.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/sections.h>
-
-#include "fsyscall_gtod_data.h"
-#include "irq.h"
-
-static u64 itc_get_cycles(struct clocksource *cs);
-
-struct fsyscall_gtod_data_t fsyscall_gtod_data;
-
-struct itc_jitter_data_t itc_jitter_data;
-
-volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
-
-#ifdef CONFIG_IA64_DEBUG_IRQ
-
-unsigned long last_cli_ip;
-EXPORT_SYMBOL(last_cli_ip);
-
-#endif
-
-static struct clocksource clocksource_itc = {
- .name = "itc",
- .rating = 350,
- .read = itc_get_cycles,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-static struct clocksource *itc_clocksource;
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-
-#include <linux/kernel_stat.h>
-
-extern u64 cycle_to_nsec(u64 cyc);
-
-void vtime_flush(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
- u64 delta;
-
- if (ti->utime)
- account_user_time(tsk, cycle_to_nsec(ti->utime));
-
- if (ti->gtime)
- account_guest_time(tsk, cycle_to_nsec(ti->gtime));
-
- if (ti->idle_time)
- account_idle_time(cycle_to_nsec(ti->idle_time));
-
- if (ti->stime) {
- delta = cycle_to_nsec(ti->stime);
- account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
- }
-
- if (ti->hardirq_time) {
- delta = cycle_to_nsec(ti->hardirq_time);
- account_system_index_time(tsk, delta, CPUTIME_IRQ);
- }
-
- if (ti->softirq_time) {
- delta = cycle_to_nsec(ti->softirq_time);
- account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
- }
-
- ti->utime = 0;
- ti->gtime = 0;
- ti->idle_time = 0;
- ti->stime = 0;
- ti->hardirq_time = 0;
- ti->softirq_time = 0;
-}
-
-/*
- * Called from the context switch with interrupts disabled, to charge all
- * accumulated times to the current process, and to prepare accounting on
- * the next process.
- */
-void arch_vtime_task_switch(struct task_struct *prev)
-{
- struct thread_info *pi = task_thread_info(prev);
- struct thread_info *ni = task_thread_info(current);
-
- ni->ac_stamp = pi->ac_stamp;
- ni->ac_stime = ni->ac_utime = 0;
-}
-
-/*
- * Account time for a transition between system, hard irq or soft irq state.
- * Note that this function is called with interrupts enabled.
- */
-static __u64 vtime_delta(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
- __u64 now, delta_stime;
-
- WARN_ON_ONCE(!irqs_disabled());
-
- now = ia64_get_itc();
- delta_stime = now - ti->ac_stamp;
- ti->ac_stamp = now;
-
- return delta_stime;
-}
-
-void vtime_account_kernel(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
- __u64 stime = vtime_delta(tsk);
-
- if (tsk->flags & PF_VCPU)
- ti->gtime += stime;
- else
- ti->stime += stime;
-}
-EXPORT_SYMBOL_GPL(vtime_account_kernel);
-
-void vtime_account_idle(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
-
- ti->idle_time += vtime_delta(tsk);
-}
-
-void vtime_account_softirq(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
-
- ti->softirq_time += vtime_delta(tsk);
-}
-
-void vtime_account_hardirq(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
-
- ti->hardirq_time += vtime_delta(tsk);
-}
-
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
-static irqreturn_t
-timer_interrupt (int irq, void *dev_id)
-{
- unsigned long new_itm;
-
- if (cpu_is_offline(smp_processor_id())) {
- return IRQ_HANDLED;
- }
-
- new_itm = local_cpu_data->itm_next;
-
- if (!time_after(ia64_get_itc(), new_itm))
- printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
- ia64_get_itc(), new_itm);
-
- while (1) {
- new_itm += local_cpu_data->itm_delta;
-
- legacy_timer_tick(smp_processor_id() == time_keeper_id);
-
- local_cpu_data->itm_next = new_itm;
-
- if (time_after(new_itm, ia64_get_itc()))
- break;
-
- /*
- * Allow IPIs to interrupt the timer loop.
- */
- local_irq_enable();
- local_irq_disable();
- }
-
- do {
- /*
- * If we're too close to the next clock tick for
- * comfort, we increase the safety margin by
- * intentionally dropping the next tick(s). We do NOT
- * update itm.next because that would force us to call
- * xtime_update() which in turn would let our clock run
- * too fast (with the potentially devastating effect
- * of losing monotony of time).
- */
- while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
- new_itm += local_cpu_data->itm_delta;
- ia64_set_itm(new_itm);
- /* double check, in case we got hit by a (slow) PMI: */
- } while (time_after_eq(ia64_get_itc(), new_itm));
- return IRQ_HANDLED;
-}
-
-/*
- * Encapsulate access to the itm structure for SMP.
- */
-void
-ia64_cpu_local_tick (void)
-{
- int cpu = smp_processor_id();
- unsigned long shift = 0, delta;
-
- /* arrange for the cycle counter to generate a timer interrupt: */
- ia64_set_itv(IA64_TIMER_VECTOR);
-
- delta = local_cpu_data->itm_delta;
- /*
- * Stagger the timer tick for each CPU so they don't occur all at (almost) the
- * same time:
- */
- if (cpu) {
- unsigned long hi = 1UL << ia64_fls(cpu);
- shift = (2*(cpu - hi) + 1) * delta/hi/2;
- }
- local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
- ia64_set_itm(local_cpu_data->itm_next);
-}
-
-static int nojitter;
-
-static int __init nojitter_setup(char *str)
-{
- nojitter = 1;
- printk("Jitter checking for ITC timers disabled\n");
- return 1;
-}
-
-__setup("nojitter", nojitter_setup);
-
-
-void ia64_init_itm(void)
-{
- unsigned long platform_base_freq, itc_freq;
- struct pal_freq_ratio itc_ratio, proc_ratio;
- long status, platform_base_drift, itc_drift;
-
- /*
- * According to SAL v2.6, we need to use a SAL call to determine the platform base
- * frequency and then a PAL call to determine the frequency ratio between the ITC
- * and the base frequency.
- */
- status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
- &platform_base_freq, &platform_base_drift);
- if (status != 0) {
- printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
- } else {
- status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
- if (status != 0)
- printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
- }
- if (status != 0) {
- /* invent "random" values */
- printk(KERN_ERR
- "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
- platform_base_freq = 100000000;
- platform_base_drift = -1; /* no drift info */
- itc_ratio.num = 3;
- itc_ratio.den = 1;
- }
- if (platform_base_freq < 40000000) {
- printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
- platform_base_freq);
- platform_base_freq = 75000000;
- platform_base_drift = -1;
- }
- if (!proc_ratio.den)
- proc_ratio.den = 1; /* avoid division by zero */
- if (!itc_ratio.den)
- itc_ratio.den = 1; /* avoid division by zero */
-
- itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
-
- local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
- printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
- "ITC freq=%lu.%03luMHz", smp_processor_id(),
- platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
- itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
-
- if (platform_base_drift != -1) {
- itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
- printk("+/-%ldppm\n", itc_drift);
- } else {
- itc_drift = -1;
- printk("\n");
- }
-
- local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
- local_cpu_data->itc_freq = itc_freq;
- local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
- local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
- + itc_freq/2)/itc_freq;
-
- if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
-#ifdef CONFIG_SMP
- /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
- * Jitter compensation requires a cmpxchg which may limit
- * the scalability of the syscalls for retrieving time.
- * The ITC synchronization is usually successful to within a few
- * ITC ticks but this is not a sure thing. If you need to improve
- * timer performance in SMP situations then boot the kernel with the
- * "nojitter" option. However, doing so may result in time fluctuating (maybe
- * even going backward) if the ITC offsets between the individual CPUs
- * are too large.
- */
- if (!nojitter)
- itc_jitter_data.itc_jitter = 1;
-#endif
- } else
- /*
- * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
- * ITC values may fluctuate significantly between processors.
- * Clock should not be used for hrtimers. Mark itc as only
- * useful for boot and testing.
- *
- * Note that jitter compensation is off! There is no point of
- * synchronizing ITCs since they may be large differentials
- * that change over time.
- *
- * The only way to fix this would be to repeatedly sync the
- * ITCs. Until that time we have to avoid ITC.
- */
- clocksource_itc.rating = 50;
-
- /* avoid softlock up message when cpu is unplug and plugged again. */
- touch_softlockup_watchdog();
-
- /* Setup the CPU local timer tick */
- ia64_cpu_local_tick();
-
- if (!itc_clocksource) {
- clocksource_register_hz(&clocksource_itc,
- local_cpu_data->itc_freq);
- itc_clocksource = &clocksource_itc;
- }
-}
-
-static u64 itc_get_cycles(struct clocksource *cs)
-{
- unsigned long lcycle, now, ret;
-
- if (!itc_jitter_data.itc_jitter)
- return get_cycles();
-
- lcycle = itc_jitter_data.itc_lastcycle;
- now = get_cycles();
- if (lcycle && time_after(lcycle, now))
- return lcycle;
-
- /*
- * Keep track of the last timer value returned.
- * In an SMP environment, you could lose out in contention of
- * cmpxchg. If so, your cmpxchg returns new value which the
- * winner of contention updated to. Use the new value instead.
- */
- ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
- if (unlikely(ret != lcycle))
- return ret;
-
- return now;
-}
-
-void read_persistent_clock64(struct timespec64 *ts)
-{
- efi_gettimeofday(ts);
-}
-
-void __init
-time_init (void)
-{
- register_percpu_irq(IA64_TIMER_VECTOR, timer_interrupt, IRQF_IRQPOLL,
- "timer");
- ia64_init_itm();
-}
-
-/*
- * Generic udelay assumes that if preemption is allowed and the thread
- * migrates to another CPU, that the ITC values are synchronized across
- * all CPUs.
- */
-static void
-ia64_itc_udelay (unsigned long usecs)
-{
- unsigned long start = ia64_get_itc();
- unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
-
- while (time_before(ia64_get_itc(), end))
- cpu_relax();
-}
-
-void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
-
-void
-udelay (unsigned long usecs)
-{
- (*ia64_udelay)(usecs);
-}
-EXPORT_SYMBOL(udelay);
-
-/* IA64 doesn't cache the timezone */
-void update_vsyscall_tz(void)
-{
-}
-
-void update_vsyscall(struct timekeeper *tk)
-{
- write_seqcount_begin(&fsyscall_gtod_data.seq);
-
- /* copy vsyscall data */
- fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
- fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
- fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
- fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
- fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
-
- fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
- fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
-
- fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
- + tk->wall_to_monotonic.tv_sec;
- fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
- + ((u64)tk->wall_to_monotonic.tv_nsec
- << tk->tkr_mono.shift);
-
- /* normalize */
- while (fsyscall_gtod_data.monotonic_time.snsec >=
- (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
- fsyscall_gtod_data.monotonic_time.snsec -=
- ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
- fsyscall_gtod_data.monotonic_time.sec++;
- }
-
- write_seqcount_end(&fsyscall_gtod_data.seq);
-}
-
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
deleted file mode 100644
index 741863a187..0000000000
--- a/arch/ia64/kernel/topology.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * This file contains NUMA specific variables and functions which are used on
- * NUMA machines with contiguous memory.
- * 2002/08/07 Erich Focht <efocht@ess.nec.de>
- * Populate cpu entries in sysfs for non-numa systems as well
- * Intel Corporation - Ashok Raj
- * 02/27/2006 Zhang, Yanmin
- * Populate cpu cache entries in sysfs for cpu cache info
- */
-
-#include <linux/cpu.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/node.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <linux/nodemask.h>
-#include <linux/notifier.h>
-#include <linux/export.h>
-#include <asm/mmzone.h>
-#include <asm/numa.h>
-#include <asm/cpu.h>
-
-static struct ia64_cpu *sysfs_cpus;
-
-void arch_fix_phys_package_id(int num, u32 slot)
-{
-#ifdef CONFIG_SMP
- if (cpu_data(num)->socket_id == -1)
- cpu_data(num)->socket_id = slot;
-#endif
-}
-EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
-
-
-#ifdef CONFIG_HOTPLUG_CPU
-int __ref arch_register_cpu(int num)
-{
- /*
- * If CPEI can be re-targeted or if this is not
- * CPEI target, then it is hotpluggable
- */
- if (can_cpei_retarget() || !is_cpu_cpei_target(num))
- sysfs_cpus[num].cpu.hotpluggable = 1;
- map_cpu_to_node(num, node_cpuid[num].nid);
- return register_cpu(&sysfs_cpus[num].cpu, num);
-}
-EXPORT_SYMBOL(arch_register_cpu);
-
-void __ref arch_unregister_cpu(int num)
-{
- unregister_cpu(&sysfs_cpus[num].cpu);
- unmap_cpu_from_node(num, cpu_to_node(num));
-}
-EXPORT_SYMBOL(arch_unregister_cpu);
-#else
-int __init arch_register_cpu(int num)
-{
- return register_cpu(&sysfs_cpus[num].cpu, num);
-}
-#endif /*CONFIG_HOTPLUG_CPU*/
-
-
-static int __init topology_init(void)
-{
- int i, err = 0;
-
- sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL);
- if (!sysfs_cpus)
- panic("kzalloc in topology_init failed - NR_CPUS too big?");
-
- for_each_present_cpu(i) {
- if((err = arch_register_cpu(i)))
- goto out;
- }
-out:
- return err;
-}
-
-subsys_initcall(topology_init);
-
-
-/*
- * Export cpu cache information through sysfs
- */
-
-/*
- * A bunch of string array to get pretty printing
- */
-static const char *cache_types[] = {
- "", /* not used */
- "Instruction",
- "Data",
- "Unified" /* unified */
-};
-
-static const char *cache_mattrib[]={
- "WriteThrough",
- "WriteBack",
- "", /* reserved */
- "" /* reserved */
-};
-
-struct cache_info {
- pal_cache_config_info_t cci;
- cpumask_t shared_cpu_map;
- int level;
- int type;
- struct kobject kobj;
-};
-
-struct cpu_cache_info {
- struct cache_info *cache_leaves;
- int num_cache_leaves;
- struct kobject kobj;
-};
-
-static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
-#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
-
-#ifdef CONFIG_SMP
-static void cache_shared_cpu_map_setup(unsigned int cpu,
- struct cache_info * this_leaf)
-{
- pal_cache_shared_info_t csi;
- int num_shared, i = 0;
- unsigned int j;
-
- if (cpu_data(cpu)->threads_per_core <= 1 &&
- cpu_data(cpu)->cores_per_socket <= 1) {
- cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
- return;
- }
-
- if (ia64_pal_cache_shared_info(this_leaf->level,
- this_leaf->type,
- 0,
- &csi) != PAL_STATUS_SUCCESS)
- return;
-
- num_shared = (int) csi.num_shared;
- do {
- for_each_possible_cpu(j)
- if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
- && cpu_data(j)->core_id == csi.log1_cid
- && cpu_data(j)->thread_id == csi.log1_tid)
- cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
-
- i++;
- } while (i < num_shared &&
- ia64_pal_cache_shared_info(this_leaf->level,
- this_leaf->type,
- i,
- &csi) == PAL_STATUS_SUCCESS);
-}
-#else
-static void cache_shared_cpu_map_setup(unsigned int cpu,
- struct cache_info * this_leaf)
-{
- cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
- return;
-}
-#endif
-
-static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
- char *buf)
-{
- return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
-}
-
-static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
- char *buf)
-{
- return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
-}
-
-static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
-{
- return sprintf(buf,
- "%s\n",
- cache_mattrib[this_leaf->cci.pcci_cache_attr]);
-}
-
-static ssize_t show_size(struct cache_info *this_leaf, char *buf)
-{
- return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
-}
-
-static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
-{
- unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
- number_of_sets /= this_leaf->cci.pcci_assoc;
- number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
-
- return sprintf(buf, "%u\n", number_of_sets);
-}
-
-static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
-{
- cpumask_t shared_cpu_map;
-
- cpumask_and(&shared_cpu_map,
- &this_leaf->shared_cpu_map, cpu_online_mask);
- return scnprintf(buf, PAGE_SIZE, "%*pb\n",
- cpumask_pr_args(&shared_cpu_map));
-}
-
-static ssize_t show_type(struct cache_info *this_leaf, char *buf)
-{
- int type = this_leaf->type + this_leaf->cci.pcci_unified;
- return sprintf(buf, "%s\n", cache_types[type]);
-}
-
-static ssize_t show_level(struct cache_info *this_leaf, char *buf)
-{
- return sprintf(buf, "%u\n", this_leaf->level);
-}
-
-struct cache_attr {
- struct attribute attr;
- ssize_t (*show)(struct cache_info *, char *);
- ssize_t (*store)(struct cache_info *, const char *, size_t count);
-};
-
-#ifdef define_one_ro
- #undef define_one_ro
-#endif
-#define define_one_ro(_name) \
- static struct cache_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(level);
-define_one_ro(type);
-define_one_ro(coherency_line_size);
-define_one_ro(ways_of_associativity);
-define_one_ro(size);
-define_one_ro(number_of_sets);
-define_one_ro(shared_cpu_map);
-define_one_ro(attributes);
-
-static struct attribute * cache_default_attrs[] = {
- &type.attr,
- &level.attr,
- &coherency_line_size.attr,
- &ways_of_associativity.attr,
- &attributes.attr,
- &size.attr,
- &number_of_sets.attr,
- &shared_cpu_map.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(cache_default);
-
-#define to_object(k) container_of(k, struct cache_info, kobj)
-#define to_attr(a) container_of(a, struct cache_attr, attr)
-
-static ssize_t ia64_cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
-{
- struct cache_attr *fattr = to_attr(attr);
- struct cache_info *this_leaf = to_object(kobj);
- ssize_t ret;
-
- ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
- return ret;
-}
-
-static const struct sysfs_ops cache_sysfs_ops = {
- .show = ia64_cache_show
-};
-
-static struct kobj_type cache_ktype = {
- .sysfs_ops = &cache_sysfs_ops,
- .default_groups = cache_default_groups,
-};
-
-static struct kobj_type cache_ktype_percpu_entry = {
- .sysfs_ops = &cache_sysfs_ops,
-};
-
-static void cpu_cache_sysfs_exit(unsigned int cpu)
-{
- kfree(all_cpu_cache_info[cpu].cache_leaves);
- all_cpu_cache_info[cpu].cache_leaves = NULL;
- all_cpu_cache_info[cpu].num_cache_leaves = 0;
- memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
- return;
-}
-
-static int cpu_cache_sysfs_init(unsigned int cpu)
-{
- unsigned long i, levels, unique_caches;
- pal_cache_config_info_t cci;
- int j;
- long status;
- struct cache_info *this_cache;
- int num_cache_leaves = 0;
-
- if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
- printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
- return -1;
- }
-
- this_cache=kcalloc(unique_caches, sizeof(struct cache_info),
- GFP_KERNEL);
- if (this_cache == NULL)
- return -ENOMEM;
-
- for (i=0; i < levels; i++) {
- for (j=2; j >0 ; j--) {
- if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
- PAL_STATUS_SUCCESS)
- continue;
-
- this_cache[num_cache_leaves].cci = cci;
- this_cache[num_cache_leaves].level = i + 1;
- this_cache[num_cache_leaves].type = j;
-
- cache_shared_cpu_map_setup(cpu,
- &this_cache[num_cache_leaves]);
- num_cache_leaves ++;
- }
- }
-
- all_cpu_cache_info[cpu].cache_leaves = this_cache;
- all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
-
- memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
-
- return 0;
-}
-
-/* Add cache interface for CPU device */
-static int cache_add_dev(unsigned int cpu)
-{
- struct device *sys_dev = get_cpu_device(cpu);
- unsigned long i, j;
- struct cache_info *this_object;
- int retval = 0;
-
- if (all_cpu_cache_info[cpu].kobj.parent)
- return 0;
-
-
- retval = cpu_cache_sysfs_init(cpu);
- if (unlikely(retval < 0))
- return retval;
-
- retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
- &cache_ktype_percpu_entry, &sys_dev->kobj,
- "%s", "cache");
- if (unlikely(retval < 0)) {
- cpu_cache_sysfs_exit(cpu);
- return retval;
- }
-
- for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
- this_object = LEAF_KOBJECT_PTR(cpu,i);
- retval = kobject_init_and_add(&(this_object->kobj),
- &cache_ktype,
- &all_cpu_cache_info[cpu].kobj,
- "index%1lu", i);
- if (unlikely(retval)) {
- for (j = 0; j < i; j++) {
- kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
- }
- kobject_put(&all_cpu_cache_info[cpu].kobj);
- cpu_cache_sysfs_exit(cpu);
- return retval;
- }
- kobject_uevent(&(this_object->kobj), KOBJ_ADD);
- }
- kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
- return retval;
-}
-
-/* Remove cache interface for CPU device */
-static int cache_remove_dev(unsigned int cpu)
-{
- unsigned long i;
-
- for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
- kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
-
- if (all_cpu_cache_info[cpu].kobj.parent) {
- kobject_put(&all_cpu_cache_info[cpu].kobj);
- memset(&all_cpu_cache_info[cpu].kobj,
- 0,
- sizeof(struct kobject));
- }
-
- cpu_cache_sysfs_exit(cpu);
-
- return 0;
-}
-
-static int __init cache_sysfs_init(void)
-{
- int ret;
-
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/topology:online",
- cache_add_dev, cache_remove_dev);
- WARN_ON(ret < 0);
- return 0;
-}
-device_initcall(cache_sysfs_init);
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
deleted file mode 100644
index 53735b1d1b..0000000000
--- a/arch/ia64/kernel/traps.c
+++ /dev/null
@@ -1,612 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Architecture-specific trap handling.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/debug.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
-#include <linux/export.h>
-#include <linux/extable.h>
-#include <linux/hardirq.h>
-#include <linux/kprobes.h>
-#include <linux/delay.h> /* for ssleep() */
-#include <linux/kdebug.h>
-#include <linux/uaccess.h>
-
-#include <asm/fpswa.h>
-#include <asm/intrinsics.h>
-#include <asm/processor.h>
-#include <asm/exception.h>
-#include <asm/setup.h>
-
-fpswa_interface_t *fpswa_interface;
-EXPORT_SYMBOL(fpswa_interface);
-
-void __init
-trap_init (void)
-{
- if (ia64_boot_param->fpswa)
- /* FPSWA fixup: make the interface pointer a kernel virtual address: */
- fpswa_interface = __va(ia64_boot_param->fpswa);
-}
-
-int
-die (const char *str, struct pt_regs *regs, long err)
-{
- static struct {
- spinlock_t lock;
- u32 lock_owner;
- int lock_owner_depth;
- } die = {
- .lock = __SPIN_LOCK_UNLOCKED(die.lock),
- .lock_owner = -1,
- .lock_owner_depth = 0
- };
- static int die_counter;
- int cpu = get_cpu();
-
- if (die.lock_owner != cpu) {
- console_verbose();
- spin_lock_irq(&die.lock);
- die.lock_owner = cpu;
- die.lock_owner_depth = 0;
- bust_spinlocks(1);
- }
- put_cpu();
-
- if (++die.lock_owner_depth < 3) {
- printk("%s[%d]: %s %ld [%d]\n",
- current->comm, task_pid_nr(current), str, err, ++die_counter);
- if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
- != NOTIFY_STOP)
- show_regs(regs);
- else
- regs = NULL;
- } else
- printk(KERN_ERR "Recursive die() failure, output suppressed\n");
-
- bust_spinlocks(0);
- die.lock_owner = -1;
- add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- spin_unlock_irq(&die.lock);
-
- if (!regs)
- return 1;
-
- if (panic_on_oops)
- panic("Fatal exception");
-
- make_task_dead(SIGSEGV);
- return 0;
-}
-
-int
-die_if_kernel (char *str, struct pt_regs *regs, long err)
-{
- if (!user_mode(regs))
- return die(str, regs, err);
- return 0;
-}
-
-void
-__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
-{
- int sig, code;
-
- switch (break_num) {
- case 0: /* unknown error (used by GCC for __builtin_abort()) */
- if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
- == NOTIFY_STOP)
- return;
- if (die_if_kernel("bugcheck!", regs, break_num))
- return;
- sig = SIGILL; code = ILL_ILLOPC;
- break;
-
- case 1: /* integer divide by zero */
- sig = SIGFPE; code = FPE_INTDIV;
- break;
-
- case 2: /* integer overflow */
- sig = SIGFPE; code = FPE_INTOVF;
- break;
-
- case 3: /* range check/bounds check */
- sig = SIGFPE; code = FPE_FLTSUB;
- break;
-
- case 4: /* null pointer dereference */
- sig = SIGSEGV; code = SEGV_MAPERR;
- break;
-
- case 5: /* misaligned data */
- sig = SIGSEGV; code = BUS_ADRALN;
- break;
-
- case 6: /* decimal overflow */
- sig = SIGFPE; code = __FPE_DECOVF;
- break;
-
- case 7: /* decimal divide by zero */
- sig = SIGFPE; code = __FPE_DECDIV;
- break;
-
- case 8: /* packed decimal error */
- sig = SIGFPE; code = __FPE_DECERR;
- break;
-
- case 9: /* invalid ASCII digit */
- sig = SIGFPE; code = __FPE_INVASC;
- break;
-
- case 10: /* invalid decimal digit */
- sig = SIGFPE; code = __FPE_INVDEC;
- break;
-
- case 11: /* paragraph stack overflow */
- sig = SIGSEGV; code = __SEGV_PSTKOVF;
- break;
-
- case 0x3f000 ... 0x3ffff: /* bundle-update in progress */
- sig = SIGILL; code = __ILL_BNDMOD;
- break;
-
- default:
- if ((break_num < 0x40000 || break_num > 0x100000)
- && die_if_kernel("Bad break", regs, break_num))
- return;
-
- if (break_num < 0x80000) {
- sig = SIGILL; code = __ILL_BREAK;
- } else {
- if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)
- == NOTIFY_STOP)
- return;
- sig = SIGTRAP; code = TRAP_BRKPT;
- }
- }
- force_sig_fault(sig, code,
- (void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
- break_num, 0 /* clear __ISR_VALID */, 0);
-}
-
-/*
- * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
- * and it doesn't own the fp-high register partition. When this happens, we save the
- * current fph partition in the task_struct of the fpu-owner (if necessary) and then load
- * the fp-high partition of the current task (if necessary). Note that the kernel has
- * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes
- * care of clearing psr.dfh.
- */
-static inline void
-disabled_fph_fault (struct pt_regs *regs)
-{
- struct ia64_psr *psr = ia64_psr(regs);
-
- /* first, grant user-level access to fph partition: */
- psr->dfh = 0;
-
- /*
- * Make sure that no other task gets in on this processor
- * while we're claiming the FPU
- */
- preempt_disable();
-#ifndef CONFIG_SMP
- {
- struct task_struct *fpu_owner
- = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
-
- if (ia64_is_local_fpu_owner(current)) {
- preempt_enable_no_resched();
- return;
- }
-
- if (fpu_owner)
- ia64_flush_fph(fpu_owner);
- }
-#endif /* !CONFIG_SMP */
- ia64_set_local_fpu_owner(current);
- if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
- __ia64_load_fpu(current->thread.fph);
- psr->mfh = 0;
- } else {
- __ia64_init_fpu();
- /*
- * Set mfh because the state in thread.fph does not match the state in
- * the fph partition.
- */
- psr->mfh = 1;
- }
- preempt_enable_no_resched();
-}
-
-static inline int
-fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
- struct pt_regs *regs)
-{
- fp_state_t fp_state;
- fpswa_ret_t ret;
-
- if (!fpswa_interface)
- return -1;
-
- memset(&fp_state, 0, sizeof(fp_state_t));
-
- /*
- * compute fp_state. only FP registers f6 - f11 are used by the
- * kernel, so set those bits in the mask and set the low volatile
- * pointer to point to these registers.
- */
- fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
-
- fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
- /*
- * unsigned long (*EFI_FPSWA) (
- * unsigned long trap_type,
- * void *Bundle,
- * unsigned long *pipsr,
- * unsigned long *pfsr,
- * unsigned long *pisr,
- * unsigned long *ppreds,
- * unsigned long *pifs,
- * void *fp_state);
- */
- ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,
- (unsigned long *) ipsr, (unsigned long *) fpsr,
- (unsigned long *) isr, (unsigned long *) pr,
- (unsigned long *) ifs, &fp_state);
-
- return ret.status;
-}
-
-struct fpu_swa_msg {
- unsigned long count;
- unsigned long time;
-};
-static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
-DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
-static struct fpu_swa_msg last __cacheline_aligned;
-
-
-/*
- * Handle floating-point assist faults and traps.
- */
-static int
-handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
-{
- long exception, bundle[2];
- unsigned long fault_ip;
-
- fault_ip = regs->cr_iip;
- if (!fp_fault && (ia64_psr(regs)->ri == 0))
- fault_ip -= 16;
- if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
- return -1;
-
- if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
- unsigned long count, current_jiffies = jiffies;
- struct fpu_swa_msg *cp = this_cpu_ptr(&cpulast);
-
- if (unlikely(current_jiffies > cp->time))
- cp->count = 0;
- if (unlikely(cp->count < 5)) {
- cp->count++;
- cp->time = current_jiffies + 5 * HZ;
-
- /* minimize races by grabbing a copy of count BEFORE checking last.time. */
- count = last.count;
- barrier();
-
- /*
- * Lower 4 bits are used as a count. Upper bits are a sequence
- * number that is updated when count is reset. The cmpxchg will
- * fail is seqno has changed. This minimizes multiple cpus
- * resetting the count.
- */
- if (current_jiffies > last.time)
- (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
-
- /* used fetchadd to atomically update the count */
- if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
- last.time = current_jiffies + 5 * HZ;
- printk(KERN_WARNING
- "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
- current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
- }
- }
- }
-
- exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
- &regs->cr_ifs, regs);
- if (fp_fault) {
- if (exception == 0) {
- /* emulation was successful */
- ia64_increment_ip(regs);
- } else if (exception == -1) {
- printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
- return -1;
- } else {
- /* is next instruction a trap? */
- int si_code;
-
- if (exception & 2) {
- ia64_increment_ip(regs);
- }
- si_code = FPE_FLTUNK; /* default code */
- if (isr & 0x11) {
- si_code = FPE_FLTINV;
- } else if (isr & 0x22) {
- /* denormal operand gets the same si_code as underflow
- * see arch/i386/kernel/traps.c:math_error() */
- si_code = FPE_FLTUND;
- } else if (isr & 0x44) {
- si_code = FPE_FLTDIV;
- }
- force_sig_fault(SIGFPE, si_code,
- (void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
- 0, __ISR_VALID, isr);
- }
- } else {
- if (exception == -1) {
- printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
- return -1;
- } else if (exception != 0) {
- /* raise exception */
- int si_code;
-
- si_code = FPE_FLTUNK; /* default code */
- if (isr & 0x880) {
- si_code = FPE_FLTOVF;
- } else if (isr & 0x1100) {
- si_code = FPE_FLTUND;
- } else if (isr & 0x2200) {
- si_code = FPE_FLTRES;
- }
- force_sig_fault(SIGFPE, si_code,
- (void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
- 0, __ISR_VALID, isr);
- }
- }
- return 0;
-}
-
-struct illegal_op_return {
- unsigned long fkt, arg1, arg2, arg3;
-};
-
-struct illegal_op_return
-ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
- long arg4, long arg5, long arg6, long arg7,
- struct pt_regs regs)
-{
- struct illegal_op_return rv;
- char buf[128];
-
-#ifdef CONFIG_IA64_BRL_EMU
- {
- extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
-
- rv = ia64_emulate_brl(&regs, ec);
- if (rv.fkt != (unsigned long) -1)
- return rv;
- }
-#endif
-
- sprintf(buf, "IA-64 Illegal operation fault");
- rv.fkt = 0;
- if (die_if_kernel(buf, &regs, 0))
- return rv;
-
- force_sig_fault(SIGILL, ILL_ILLOPC,
- (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri),
- 0, 0, 0);
- return rv;
-}
-
-void __kprobes
-ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
- unsigned long iim, unsigned long itir, long arg5, long arg6,
- long arg7, struct pt_regs regs)
-{
- unsigned long code, error = isr, iip;
- char buf[128];
- int result, sig, si_code;
- static const char *reason[] = {
- "IA-64 Illegal Operation fault",
- "IA-64 Privileged Operation fault",
- "IA-64 Privileged Register fault",
- "IA-64 Reserved Register/Field fault",
- "Disabled Instruction Set Transition fault",
- "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
- "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
- "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
- };
-
- if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
- /*
- * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
- * the lfetch.
- */
- ia64_psr(&regs)->ed = 1;
- return;
- }
-
- iip = regs.cr_iip + ia64_psr(&regs)->ri;
-
- switch (vector) {
- case 24: /* General Exception */
- code = (isr >> 4) & 0xf;
- sprintf(buf, "General Exception: %s%s", reason[code],
- (code == 3) ? ((isr & (1UL << 37))
- ? " (RSE access)" : " (data access)") : "");
- if (code == 8) {
-# ifdef CONFIG_IA64_PRINT_HAZARDS
- printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
- current->comm, task_pid_nr(current),
- regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
-# endif
- return;
- }
- break;
-
- case 25: /* Disabled FP-Register */
- if (isr & 2) {
- disabled_fph_fault(&regs);
- return;
- }
- sprintf(buf, "Disabled FPL fault---not supposed to happen!");
- break;
-
- case 26: /* NaT Consumption */
- if (user_mode(&regs)) {
- void __user *addr;
-
- if (((isr >> 4) & 0xf) == 2) {
- /* NaT page consumption */
- sig = SIGSEGV;
- code = SEGV_ACCERR;
- addr = (void __user *) ifa;
- } else {
- /* register NaT consumption */
- sig = SIGILL;
- code = ILL_ILLOPN;
- addr = (void __user *) (regs.cr_iip
- + ia64_psr(&regs)->ri);
- }
- force_sig_fault(sig, code, addr,
- vector, __ISR_VALID, isr);
- return;
- } else if (ia64_done_with_exception(&regs))
- return;
- sprintf(buf, "NaT consumption");
- break;
-
- case 31: /* Unsupported Data Reference */
- if (user_mode(&regs)) {
- force_sig_fault(SIGILL, ILL_ILLOPN, (void __user *) iip,
- vector, __ISR_VALID, isr);
- return;
- }
- sprintf(buf, "Unsupported data reference");
- break;
-
- case 29: /* Debug */
- case 35: /* Taken Branch Trap */
- case 36: /* Single Step Trap */
- if (fsys_mode(current, &regs)) {
- extern char __kernel_syscall_via_break[];
- /*
- * Got a trap in fsys-mode: Taken Branch Trap
- * and Single Step trap need special handling;
- * Debug trap is ignored (we disable it here
- * and re-enable it in the lower-privilege trap).
- */
- if (unlikely(vector == 29)) {
- set_thread_flag(TIF_DB_DISABLED);
- ia64_psr(&regs)->db = 0;
- ia64_psr(&regs)->lp = 1;
- return;
- }
- /* re-do the system call via break 0x100000: */
- regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
- ia64_psr(&regs)->ri = 0;
- ia64_psr(&regs)->cpl = 3;
- return;
- }
- switch (vector) {
- default:
- case 29:
- si_code = TRAP_HWBKPT;
-#ifdef CONFIG_ITANIUM
- /*
- * Erratum 10 (IFA may contain incorrect address) now has
- * "NoFix" status. There are no plans for fixing this.
- */
- if (ia64_psr(&regs)->is == 0)
- ifa = regs.cr_iip;
-#endif
- break;
- case 35: si_code = TRAP_BRANCH; ifa = 0; break;
- case 36: si_code = TRAP_TRACE; ifa = 0; break;
- }
- if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, si_code, SIGTRAP)
- == NOTIFY_STOP)
- return;
- force_sig_fault(SIGTRAP, si_code, (void __user *) ifa,
- 0, __ISR_VALID, isr);
- return;
-
- case 32: /* fp fault */
- case 33: /* fp trap */
- result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
- if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
- force_sig_fault(SIGFPE, FPE_FLTINV, (void __user *) iip,
- 0, __ISR_VALID, isr);
- }
- return;
-
- case 34:
- if (isr & 0x2) {
- /* Lower-Privilege Transfer Trap */
-
- /* If we disabled debug traps during an fsyscall,
- * re-enable them here.
- */
- if (test_thread_flag(TIF_DB_DISABLED)) {
- clear_thread_flag(TIF_DB_DISABLED);
- ia64_psr(&regs)->db = 1;
- }
-
- /*
- * Just clear PSR.lp and then return immediately:
- * all the interesting work (e.g., signal delivery)
- * is done in the kernel exit path.
- */
- ia64_psr(&regs)->lp = 0;
- return;
- } else {
- /* Unimplemented Instr. Address Trap */
- if (user_mode(&regs)) {
- force_sig_fault(SIGILL, ILL_BADIADDR,
- (void __user *) iip,
- 0, 0, 0);
- return;
- }
- sprintf(buf, "Unimplemented Instruction Address fault");
- }
- break;
-
- case 45:
- printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
- printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
- iip, ifa, isr);
- force_sig(SIGSEGV);
- return;
-
- case 46:
- printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
- printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
- iip, ifa, isr, iim);
- force_sig(SIGSEGV);
- return;
-
- case 47:
- sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
- break;
-
- default:
- sprintf(buf, "Fault %lu", vector);
- break;
- }
- if (!die_if_kernel(buf, &regs, error))
- force_sig(SIGILL);
-}
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
deleted file mode 100644
index 0acb5a0cd7..0000000000
--- a/arch/ia64/kernel/unaligned.c
+++ /dev/null
@@ -1,1560 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Architecture-specific unaligned trap handling.
- *
- * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 2002/12/09 Fix rotating register handling (off-by-1 error, missing fr-rotation). Fix
- * get_rse_reg() to not leak kernel bits to user-level (reading an out-of-frame
- * stacked register returns an undefined value; it does NOT trigger a
- * "rsvd register fault").
- * 2001/10/11 Fix unaligned access to rotating registers in s/w pipelined loops.
- * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes.
- * 2001/01/17 Add support emulation of unaligned kernel accesses.
- */
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/tty.h>
-#include <linux/extable.h>
-#include <linux/ratelimit.h>
-#include <linux/uaccess.h>
-
-#include <asm/intrinsics.h>
-#include <asm/processor.h>
-#include <asm/rse.h>
-#include <asm/exception.h>
-#include <asm/unaligned.h>
-
-extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
-
-#undef DEBUG_UNALIGNED_TRAP
-
-#ifdef DEBUG_UNALIGNED_TRAP
-# define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0)
-# define DDUMP(str,vp,len) dump(str, vp, len)
-
-static void
-dump (const char *str, void *vp, size_t len)
-{
- unsigned char *cp = vp;
- int i;
-
- printk("%s", str);
- for (i = 0; i < len; ++i)
- printk (" %02x", *cp++);
- printk("\n");
-}
-#else
-# define DPRINT(a...)
-# define DDUMP(str,vp,len)
-#endif
-
-#define IA64_FIRST_STACKED_GR 32
-#define IA64_FIRST_ROTATING_FR 32
-#define SIGN_EXT9 0xffffffffffffff00ul
-
-/*
- * sysctl settable hook which tells the kernel whether to honor the
- * IA64_THREAD_UAC_NOPRINT prctl. Because this is user settable, we want
- * to allow the super user to enable/disable this for security reasons
- * (i.e. don't allow attacker to fill up logs with unaligned accesses).
- */
-int no_unaligned_warning;
-int unaligned_dump_stack;
-
-/*
- * For M-unit:
- *
- * opcode | m | x6 |
- * --------|------|---------|
- * [40-37] | [36] | [35:30] |
- * --------|------|---------|
- * 4 | 1 | 6 | = 11 bits
- * --------------------------
- * However bits [31:30] are not directly useful to distinguish between
- * load/store so we can use [35:32] instead, which gives the following
- * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer
- * checking the m-bit until later in the load/store emulation.
- */
-#define IA64_OPCODE_MASK 0x1ef
-#define IA64_OPCODE_SHIFT 32
-
-/*
- * Table C-28 Integer Load/Store
- *
- * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
- *
- * ld8.fill, st8.fill MUST be aligned because the RNATs are based on
- * the address (bits [8:3]), so we must failed.
- */
-#define LD_OP 0x080
-#define LDS_OP 0x081
-#define LDA_OP 0x082
-#define LDSA_OP 0x083
-#define LDBIAS_OP 0x084
-#define LDACQ_OP 0x085
-/* 0x086, 0x087 are not relevant */
-#define LDCCLR_OP 0x088
-#define LDCNC_OP 0x089
-#define LDCCLRACQ_OP 0x08a
-#define ST_OP 0x08c
-#define STREL_OP 0x08d
-/* 0x08e,0x8f are not relevant */
-
-/*
- * Table C-29 Integer Load +Reg
- *
- * we use the ld->m (bit [36:36]) field to determine whether or not we have
- * a load/store of this form.
- */
-
-/*
- * Table C-30 Integer Load/Store +Imm
- *
- * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
- *
- * ld8.fill, st8.fill must be aligned because the Nat register are based on
- * the address, so we must fail and the program must be fixed.
- */
-#define LD_IMM_OP 0x0a0
-#define LDS_IMM_OP 0x0a1
-#define LDA_IMM_OP 0x0a2
-#define LDSA_IMM_OP 0x0a3
-#define LDBIAS_IMM_OP 0x0a4
-#define LDACQ_IMM_OP 0x0a5
-/* 0x0a6, 0xa7 are not relevant */
-#define LDCCLR_IMM_OP 0x0a8
-#define LDCNC_IMM_OP 0x0a9
-#define LDCCLRACQ_IMM_OP 0x0aa
-#define ST_IMM_OP 0x0ac
-#define STREL_IMM_OP 0x0ad
-/* 0x0ae,0xaf are not relevant */
-
-/*
- * Table C-32 Floating-point Load/Store
- */
-#define LDF_OP 0x0c0
-#define LDFS_OP 0x0c1
-#define LDFA_OP 0x0c2
-#define LDFSA_OP 0x0c3
-/* 0x0c6 is irrelevant */
-#define LDFCCLR_OP 0x0c8
-#define LDFCNC_OP 0x0c9
-/* 0x0cb is irrelevant */
-#define STF_OP 0x0cc
-
-/*
- * Table C-33 Floating-point Load +Reg
- *
- * we use the ld->m (bit [36:36]) field to determine whether or not we have
- * a load/store of this form.
- */
-
-/*
- * Table C-34 Floating-point Load/Store +Imm
- */
-#define LDF_IMM_OP 0x0e0
-#define LDFS_IMM_OP 0x0e1
-#define LDFA_IMM_OP 0x0e2
-#define LDFSA_IMM_OP 0x0e3
-/* 0x0e6 is irrelevant */
-#define LDFCCLR_IMM_OP 0x0e8
-#define LDFCNC_IMM_OP 0x0e9
-#define STF_IMM_OP 0x0ec
-
-typedef struct {
- unsigned long qp:6; /* [0:5] */
- unsigned long r1:7; /* [6:12] */
- unsigned long imm:7; /* [13:19] */
- unsigned long r3:7; /* [20:26] */
- unsigned long x:1; /* [27:27] */
- unsigned long hint:2; /* [28:29] */
- unsigned long x6_sz:2; /* [30:31] */
- unsigned long x6_op:4; /* [32:35], x6 = x6_sz|x6_op */
- unsigned long m:1; /* [36:36] */
- unsigned long op:4; /* [37:40] */
- unsigned long pad:23; /* [41:63] */
-} load_store_t;
-
-
-typedef enum {
- UPD_IMMEDIATE, /* ldXZ r1=[r3],imm(9) */
- UPD_REG /* ldXZ r1=[r3],r2 */
-} update_t;
-
-/*
- * We use tables to keep track of the offsets of registers in the saved state.
- * This way we save having big switch/case statements.
- *
- * We use bit 0 to indicate switch_stack or pt_regs.
- * The offset is simply shifted by 1 bit.
- * A 2-byte value should be enough to hold any kind of offset
- *
- * In case the calling convention changes (and thus pt_regs/switch_stack)
- * simply use RSW instead of RPT or vice-versa.
- */
-
-#define RPO(x) ((size_t) &((struct pt_regs *)0)->x)
-#define RSO(x) ((size_t) &((struct switch_stack *)0)->x)
-
-#define RPT(x) (RPO(x) << 1)
-#define RSW(x) (1| RSO(x)<<1)
-
-#define GR_OFFS(x) (gr_info[x]>>1)
-#define GR_IN_SW(x) (gr_info[x] & 0x1)
-
-#define FR_OFFS(x) (fr_info[x]>>1)
-#define FR_IN_SW(x) (fr_info[x] & 0x1)
-
-static u16 gr_info[32]={
- 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
-
- RPT(r1), RPT(r2), RPT(r3),
-
- RSW(r4), RSW(r5), RSW(r6), RSW(r7),
-
- RPT(r8), RPT(r9), RPT(r10), RPT(r11),
- RPT(r12), RPT(r13), RPT(r14), RPT(r15),
-
- RPT(r16), RPT(r17), RPT(r18), RPT(r19),
- RPT(r20), RPT(r21), RPT(r22), RPT(r23),
- RPT(r24), RPT(r25), RPT(r26), RPT(r27),
- RPT(r28), RPT(r29), RPT(r30), RPT(r31)
-};
-
-static u16 fr_info[32]={
- 0, /* constant : WE SHOULD NEVER GET THIS */
- 0, /* constant : WE SHOULD NEVER GET THIS */
-
- RSW(f2), RSW(f3), RSW(f4), RSW(f5),
-
- RPT(f6), RPT(f7), RPT(f8), RPT(f9),
- RPT(f10), RPT(f11),
-
- RSW(f12), RSW(f13), RSW(f14),
- RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
- RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
- RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
- RSW(f30), RSW(f31)
-};
-
-/* Invalidate ALAT entry for integer register REGNO. */
-static void
-invala_gr (int regno)
-{
-# define F(reg) case reg: ia64_invala_gr(reg); break
-
- switch (regno) {
- F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
- F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
- F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
- F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
- F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
- F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
- F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
- F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
- F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
- F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
- F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
- F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
- F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
- F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
- F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
- F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
- }
-# undef F
-}
-
-/* Invalidate ALAT entry for floating-point register REGNO. */
-static void
-invala_fr (int regno)
-{
-# define F(reg) case reg: ia64_invala_fr(reg); break
-
- switch (regno) {
- F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
- F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
- F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
- F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
- F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
- F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
- F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
- F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
- F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
- F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
- F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
- F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
- F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
- F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
- F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
- F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
- }
-# undef F
-}
-
-static inline unsigned long
-rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
-{
- reg += rrb;
- if (reg >= sor)
- reg -= sor;
- return reg;
-}
-
-static void
-set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
- unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
- unsigned long rnats, nat_mask;
- unsigned long on_kbs;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx >= sof) {
- /* this should never happen, as the "rsvd register fault" has higher priority */
- DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
- return;
- }
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
- r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
-
- on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
- addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
- if (addr >= kbs) {
- /* the register is on the kernel backing store: easy... */
- rnat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) rnat_addr >= sw->ar_bspstore)
- rnat_addr = &sw->ar_rnat;
- nat_mask = 1UL << ia64_rse_slot_num(addr);
-
- *addr = val;
- if (nat)
- *rnat_addr |= nat_mask;
- else
- *rnat_addr &= ~nat_mask;
- return;
- }
-
- if (!user_stack(current, regs)) {
- DPRINT("ignoring kernel write to r%lu; register isn't on the kernel RBS!", r1);
- return;
- }
-
- bspstore = (unsigned long *)regs->ar_bspstore;
- ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
- bsp = ia64_rse_skip_regs(ubs_end, -sof);
- addr = ia64_rse_skip_regs(bsp, ridx);
-
- DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
-
- ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
-
- rnat_addr = ia64_rse_rnat_addr(addr);
-
- ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
- DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
- (void *) rnat_addr, rnats, nat, (rnats >> ia64_rse_slot_num(addr)) & 1);
-
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- if (nat)
- rnats |= nat_mask;
- else
- rnats &= ~nat_mask;
- ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats);
-
- DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
-}
-
-
-static void
-get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *nat)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
- unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
- unsigned long rnats, nat_mask;
- unsigned long on_kbs;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx >= sof) {
- /* read of out-of-frame register returns an undefined value; 0 in our case. */
- DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
- goto fail;
- }
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
- r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
-
- on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
- addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
- if (addr >= kbs) {
- /* the register is on the kernel backing store: easy... */
- *val = *addr;
- if (nat) {
- rnat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) rnat_addr >= sw->ar_bspstore)
- rnat_addr = &sw->ar_rnat;
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- *nat = (*rnat_addr & nat_mask) != 0;
- }
- return;
- }
-
- if (!user_stack(current, regs)) {
- DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1);
- goto fail;
- }
-
- bspstore = (unsigned long *)regs->ar_bspstore;
- ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
- bsp = ia64_rse_skip_regs(ubs_end, -sof);
- addr = ia64_rse_skip_regs(bsp, ridx);
-
- DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
-
- ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
-
- if (nat) {
- rnat_addr = ia64_rse_rnat_addr(addr);
- nat_mask = 1UL << ia64_rse_slot_num(addr);
-
- DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
-
- ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
- *nat = (rnats & nat_mask) != 0;
- }
- return;
-
- fail:
- *val = 0;
- if (nat)
- *nat = 0;
- return;
-}
-
-
-static void
-setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long addr;
- unsigned long bitmask;
- unsigned long *unat;
-
- /*
- * First takes care of stacked registers
- */
- if (regnum >= IA64_FIRST_STACKED_GR) {
- set_rse_reg(regs, regnum, val, nat);
- return;
- }
-
- /*
- * Using r0 as a target raises a General Exception fault which has higher priority
- * than the Unaligned Reference fault.
- */
-
- /*
- * Now look at registers in [0-31] range and init correct UNAT
- */
- if (GR_IN_SW(regnum)) {
- addr = (unsigned long)sw;
- unat = &sw->ar_unat;
- } else {
- addr = (unsigned long)regs;
- unat = &sw->caller_unat;
- }
- DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
- addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
- /*
- * add offset from base of struct
- * and do it !
- */
- addr += GR_OFFS(regnum);
-
- *(unsigned long *)addr = val;
-
- /*
- * We need to clear the corresponding UNAT bit to fully emulate the load
- * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
- */
- bitmask = 1UL << (addr >> 3 & 0x3f);
- DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void *) unat, *unat);
- if (nat) {
- *unat |= bitmask;
- } else {
- *unat &= ~bitmask;
- }
- DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void *) unat,*unat);
-}
-
-/*
- * Return the (rotated) index for floating point register REGNUM (REGNUM must be in the
- * range from 32-127, result is in the range from 0-95.
- */
-static inline unsigned long
-fph_index (struct pt_regs *regs, long regnum)
-{
- unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
- return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
-}
-
-static void
-setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
-{
- struct switch_stack *sw = (struct switch_stack *)regs - 1;
- unsigned long addr;
-
- /*
- * From EAS-2.5: FPDisableFault has higher priority than Unaligned
- * Fault. Thus, when we get here, we know the partition is enabled.
- * To update f32-f127, there are three choices:
- *
- * (1) save f32-f127 to thread.fph and update the values there
- * (2) use a gigantic switch statement to directly access the registers
- * (3) generate code on the fly to update the desired register
- *
- * For now, we are using approach (1).
- */
- if (regnum >= IA64_FIRST_ROTATING_FR) {
- ia64_sync_fph(current);
- current->thread.fph[fph_index(regs, regnum)] = *fpval;
- } else {
- /*
- * pt_regs or switch_stack ?
- */
- if (FR_IN_SW(regnum)) {
- addr = (unsigned long)sw;
- } else {
- addr = (unsigned long)regs;
- }
-
- DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));
-
- addr += FR_OFFS(regnum);
- *(struct ia64_fpreg *)addr = *fpval;
-
- /*
- * mark the low partition as being used now
- *
- * It is highly unlikely that this bit is not already set, but
- * let's do it for safety.
- */
- regs->cr_ipsr |= IA64_PSR_MFL;
- }
-}
-
-/*
- * Those 2 inline functions generate the spilled versions of the constant floating point
- * registers which can be used with stfX
- */
-static inline void
-float_spill_f0 (struct ia64_fpreg *final)
-{
- ia64_stf_spill(final, 0);
-}
-
-static inline void
-float_spill_f1 (struct ia64_fpreg *final)
-{
- ia64_stf_spill(final, 1);
-}
-
-static void
-getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long addr;
-
- /*
- * From EAS-2.5: FPDisableFault has higher priority than
- * Unaligned Fault. Thus, when we get here, we know the partition is
- * enabled.
- *
- * When regnum > 31, the register is still live and we need to force a save
- * to current->thread.fph to get access to it. See discussion in setfpreg()
- * for reasons and other ways of doing this.
- */
- if (regnum >= IA64_FIRST_ROTATING_FR) {
- ia64_flush_fph(current);
- *fpval = current->thread.fph[fph_index(regs, regnum)];
- } else {
- /*
- * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
- * not saved, we must generate their spilled form on the fly
- */
- switch(regnum) {
- case 0:
- float_spill_f0(fpval);
- break;
- case 1:
- float_spill_f1(fpval);
- break;
- default:
- /*
- * pt_regs or switch_stack ?
- */
- addr = FR_IN_SW(regnum) ? (unsigned long)sw
- : (unsigned long)regs;
-
- DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",
- FR_IN_SW(regnum), addr, FR_OFFS(regnum));
-
- addr += FR_OFFS(regnum);
- *fpval = *(struct ia64_fpreg *)addr;
- }
- }
-}
-
-
-static void
-getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long addr, *unat;
-
- if (regnum >= IA64_FIRST_STACKED_GR) {
- get_rse_reg(regs, regnum, val, nat);
- return;
- }
-
- /*
- * take care of r0 (read-only always evaluate to 0)
- */
- if (regnum == 0) {
- *val = 0;
- if (nat)
- *nat = 0;
- return;
- }
-
- /*
- * Now look at registers in [0-31] range and init correct UNAT
- */
- if (GR_IN_SW(regnum)) {
- addr = (unsigned long)sw;
- unat = &sw->ar_unat;
- } else {
- addr = (unsigned long)regs;
- unat = &sw->caller_unat;
- }
-
- DPRINT("addr_base=%lx offset=0x%x\n", addr, GR_OFFS(regnum));
-
- addr += GR_OFFS(regnum);
-
- *val = *(unsigned long *)addr;
-
- /*
- * do it only when requested
- */
- if (nat)
- *nat = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
-}
-
-static void
-emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsigned long ifa)
-{
- /*
- * IMPORTANT:
- * Given the way we handle unaligned speculative loads, we should
- * not get to this point in the code but we keep this sanity check,
- * just in case.
- */
- if (ld.x6_op == 1 || ld.x6_op == 3) {
- printk(KERN_ERR "%s: register update on speculative load, error\n", __func__);
- if (die_if_kernel("unaligned reference on speculative load with register update\n",
- regs, 30))
- return;
- }
-
-
- /*
- * at this point, we know that the base register to update is valid i.e.,
- * it's not r0
- */
- if (type == UPD_IMMEDIATE) {
- unsigned long imm;
-
- /*
- * Load +Imm: ldXZ r1=[r3],imm(9)
- *
- *
- * form imm9: [13:19] contain the first 7 bits
- */
- imm = ld.x << 7 | ld.imm;
-
- /*
- * sign extend (1+8bits) if m set
- */
- if (ld.m) imm |= SIGN_EXT9;
-
- /*
- * ifa == r3 and we know that the NaT bit on r3 was clear so
- * we can directly use ifa.
- */
- ifa += imm;
-
- setreg(ld.r3, ifa, 0, regs);
-
- DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, ifa);
-
- } else if (ld.m) {
- unsigned long r2;
- int nat_r2;
-
- /*
- * Load +Reg Opcode: ldXZ r1=[r3],r2
- *
- * Note: that we update r3 even in the case of ldfX.a
- * (where the load does not happen)
- *
- * The way the load algorithm works, we know that r3 does not
- * have its NaT bit set (would have gotten NaT consumption
- * before getting the unaligned fault). So we can use ifa
- * which equals r3 at this point.
- *
- * IMPORTANT:
- * The above statement holds ONLY because we know that we
- * never reach this code when trying to do a ldX.s.
- * If we ever make it to here on an ldfX.s then
- */
- getreg(ld.imm, &r2, &nat_r2, regs);
-
- ifa += r2;
-
- /*
- * propagate Nat r2 -> r3
- */
- setreg(ld.r3, ifa, nat_r2, regs);
-
- DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, nat_r2);
- }
-}
-
-static int emulate_store(unsigned long ifa, void *val, int len, bool kernel_mode)
-{
- if (kernel_mode)
- return copy_to_kernel_nofault((void *)ifa, val, len);
-
- return copy_to_user((void __user *)ifa, val, len);
-}
-
-static int emulate_load(void *val, unsigned long ifa, int len, bool kernel_mode)
-{
- if (kernel_mode)
- return copy_from_kernel_nofault(val, (void *)ifa, len);
-
- return copy_from_user(val, (void __user *)ifa, len);
-}
-
-static int
-emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
- bool kernel_mode)
-{
- unsigned int len = 1 << ld.x6_sz;
- unsigned long val = 0;
-
- /*
- * r0, as target, doesn't need to be checked because Illegal Instruction
- * faults have higher priority than unaligned faults.
- *
- * r0 cannot be found as the base as it would never generate an
- * unaligned reference.
- */
-
- /*
- * ldX.a we will emulate load and also invalidate the ALAT entry.
- * See comment below for explanation on how we handle ldX.a
- */
-
- if (len != 2 && len != 4 && len != 8) {
- DPRINT("unknown size: x6=%d\n", ld.x6_sz);
- return -1;
- }
- /* this assumes little-endian byte-order: */
- if (emulate_load(&val, ifa, len, kernel_mode))
- return -1;
- setreg(ld.r1, val, 0, regs);
-
- /*
- * check for updates on any kind of loads
- */
- if (ld.op == 0x5 || ld.m)
- emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
-
- /*
- * handling of various loads (based on EAS2.4):
- *
- * ldX.acq (ordered load):
- * - acquire semantics would have been used, so force fence instead.
- *
- * ldX.c.clr (check load and clear):
- * - if we get to this handler, it's because the entry was not in the ALAT.
- * Therefore the operation reverts to a normal load
- *
- * ldX.c.nc (check load no clear):
- * - same as previous one
- *
- * ldX.c.clr.acq (ordered check load and clear):
- * - same as above for c.clr part. The load needs to have acquire semantics. So
- * we use the fence semantics which is stronger and thus ensures correctness.
- *
- * ldX.a (advanced load):
- * - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
- * address doesn't match requested size alignment. This means that we would
- * possibly need more than one load to get the result.
- *
- * The load part can be handled just like a normal load, however the difficult
- * part is to get the right thing into the ALAT. The critical piece of information
- * in the base address of the load & size. To do that, a ld.a must be executed,
- * clearly any address can be pushed into the table by using ld1.a r1=[r3]. Now
- * if we use the same target register, we will be okay for the check.a instruction.
- * If we look at the store, basically a stX [r3]=r1 checks the ALAT for any entry
- * which would overlap within [r3,r3+X] (the size of the load was store in the
- * ALAT). If such an entry is found the entry is invalidated. But this is not good
- * enough, take the following example:
- * r3=3
- * ld4.a r1=[r3]
- *
- * Could be emulated by doing:
- * ld1.a r1=[r3],1
- * store to temporary;
- * ld1.a r1=[r3],1
- * store & shift to temporary;
- * ld1.a r1=[r3],1
- * store & shift to temporary;
- * ld1.a r1=[r3]
- * store & shift to temporary;
- * r1=temporary
- *
- * So in this case, you would get the right value is r1 but the wrong info in
- * the ALAT. Notice that you could do it in reverse to finish with address 3
- * but you would still get the size wrong. To get the size right, one needs to
- * execute exactly the same kind of load. You could do it from a aligned
- * temporary location, but you would get the address wrong.
- *
- * So no matter what, it is not possible to emulate an advanced load
- * correctly. But is that really critical ?
- *
- * We will always convert ld.a into a normal load with ALAT invalidated. This
- * will enable compiler to do optimization where certain code path after ld.a
- * is not required to have ld.c/chk.a, e.g., code path with no intervening stores.
- *
- * If there is a store after the advanced load, one must either do a ld.c.* or
- * chk.a.* to reuse the value stored in the ALAT. Both can "fail" (meaning no
- * entry found in ALAT), and that's perfectly ok because:
- *
- * - ld.c.*, if the entry is not present a normal load is executed
- * - chk.a.*, if the entry is not present, execution jumps to recovery code
- *
- * In either case, the load can be potentially retried in another form.
- *
- * ALAT must be invalidated for the register (so that chk.a or ld.c don't pick
- * up a stale entry later). The register base update MUST also be performed.
- */
-
- /*
- * when the load has the .acq completer then
- * use ordering fence.
- */
- if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
- mb();
-
- /*
- * invalidate ALAT entry in case of advanced load
- */
- if (ld.x6_op == 0x2)
- invala_gr(ld.r1);
-
- return 0;
-}
-
-static int
-emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
- bool kernel_mode)
-{
- unsigned long r2;
- unsigned int len = 1 << ld.x6_sz;
-
- /*
- * if we get to this handler, Nat bits on both r3 and r2 have already
- * been checked. so we don't need to do it
- *
- * extract the value to be stored
- */
- getreg(ld.imm, &r2, NULL, regs);
-
- /*
- * we rely on the macros in unaligned.h for now i.e.,
- * we let the compiler figure out how to read memory gracefully.
- *
- * We need this switch/case because the way the inline function
- * works. The code is optimized by the compiler and looks like
- * a single switch/case.
- */
- DPRINT("st%d [%lx]=%lx\n", len, ifa, r2);
-
- if (len != 2 && len != 4 && len != 8) {
- DPRINT("unknown size: x6=%d\n", ld.x6_sz);
- return -1;
- }
-
- /* this assumes little-endian byte-order: */
- if (emulate_store(ifa, &r2, len, kernel_mode))
- return -1;
-
- /*
- * stX [r3]=r2,imm(9)
- *
- * NOTE:
- * ld.r3 can never be r0, because r0 would not generate an
- * unaligned access.
- */
- if (ld.op == 0x5) {
- unsigned long imm;
-
- /*
- * form imm9: [12:6] contain first 7bits
- */
- imm = ld.x << 7 | ld.r1;
- /*
- * sign extend (8bits) if m set
- */
- if (ld.m) imm |= SIGN_EXT9;
- /*
- * ifa == r3 (NaT is necessarily cleared)
- */
- ifa += imm;
-
- DPRINT("imm=%lx r3=%lx\n", imm, ifa);
-
- setreg(ld.r3, ifa, 0, regs);
- }
- /*
- * we don't have alat_invalidate_multiple() so we need
- * to do the complete flush :-<<
- */
- ia64_invala();
-
- /*
- * stX.rel: use fence instead of release
- */
- if (ld.x6_op == 0xd)
- mb();
-
- return 0;
-}
-
-/*
- * floating point operations sizes in bytes
- */
-static const unsigned char float_fsz[4]={
- 10, /* extended precision (e) */
- 8, /* integer (8) */
- 4, /* single precision (s) */
- 8 /* double precision (d) */
-};
-
-static inline void
-mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldfe(6, init);
- ia64_stop();
- ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf8(6, init);
- ia64_stop();
- ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldfs(6, init);
- ia64_stop();
- ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldfd(6, init);
- ia64_stop();
- ia64_stf_spill(final, 6);
-}
-
-static inline void
-float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf_fill(6, init);
- ia64_stop();
- ia64_stfe(final, 6);
-}
-
-static inline void
-float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf_fill(6, init);
- ia64_stop();
- ia64_stf8(final, 6);
-}
-
-static inline void
-float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf_fill(6, init);
- ia64_stop();
- ia64_stfs(final, 6);
-}
-
-static inline void
-float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf_fill(6, init);
- ia64_stop();
- ia64_stfd(final, 6);
-}
-
-static int
-emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode)
-{
- struct ia64_fpreg fpr_init[2];
- struct ia64_fpreg fpr_final[2];
- unsigned long len = float_fsz[ld.x6_sz];
-
- /*
- * fr0 & fr1 don't need to be checked because Illegal Instruction faults have
- * higher priority than unaligned faults.
- *
- * r0 cannot be found as the base as it would never generate an unaligned
- * reference.
- */
-
- /*
- * make sure we get clean buffers
- */
- memset(&fpr_init, 0, sizeof(fpr_init));
- memset(&fpr_final, 0, sizeof(fpr_final));
-
- /*
- * ldfpX.a: we don't try to emulate anything but we must
- * invalidate the ALAT entry and execute updates, if any.
- */
- if (ld.x6_op != 0x2) {
- /*
- * This assumes little-endian byte-order. Note that there is no "ldfpe"
- * instruction:
- */
- if (emulate_load(&fpr_init[0], ifa, len, kernel_mode)
- || emulate_load(&fpr_init[1], (ifa + len), len, kernel_mode))
- return -1;
-
- DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
- DDUMP("frp_init =", &fpr_init, 2*len);
- /*
- * XXX fixme
- * Could optimize inlines by using ldfpX & 2 spills
- */
- switch( ld.x6_sz ) {
- case 0:
- mem2float_extended(&fpr_init[0], &fpr_final[0]);
- mem2float_extended(&fpr_init[1], &fpr_final[1]);
- break;
- case 1:
- mem2float_integer(&fpr_init[0], &fpr_final[0]);
- mem2float_integer(&fpr_init[1], &fpr_final[1]);
- break;
- case 2:
- mem2float_single(&fpr_init[0], &fpr_final[0]);
- mem2float_single(&fpr_init[1], &fpr_final[1]);
- break;
- case 3:
- mem2float_double(&fpr_init[0], &fpr_final[0]);
- mem2float_double(&fpr_init[1], &fpr_final[1]);
- break;
- }
- DDUMP("fpr_final =", &fpr_final, 2*len);
- /*
- * XXX fixme
- *
- * A possible optimization would be to drop fpr_final and directly
- * use the storage from the saved context i.e., the actual final
- * destination (pt_regs, switch_stack or thread structure).
- */
- setfpreg(ld.r1, &fpr_final[0], regs);
- setfpreg(ld.imm, &fpr_final[1], regs);
- }
-
- /*
- * Check for updates: only immediate updates are available for this
- * instruction.
- */
- if (ld.m) {
- /*
- * the immediate is implicit given the ldsz of the operation:
- * single: 8 (2x4) and for all others it's 16 (2x8)
- */
- ifa += len<<1;
-
- /*
- * IMPORTANT:
- * the fact that we force the NaT of r3 to zero is ONLY valid
- * as long as we don't come here with a ldfpX.s.
- * For this reason we keep this sanity check
- */
- if (ld.x6_op == 1 || ld.x6_op == 3)
- printk(KERN_ERR "%s: register update on speculative load pair, error\n",
- __func__);
-
- setreg(ld.r3, ifa, 0, regs);
- }
-
- /*
- * Invalidate ALAT entries, if any, for both registers.
- */
- if (ld.x6_op == 0x2) {
- invala_fr(ld.r1);
- invala_fr(ld.imm);
- }
- return 0;
-}
-
-
-static int
-emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
- bool kernel_mode)
-{
- struct ia64_fpreg fpr_init;
- struct ia64_fpreg fpr_final;
- unsigned long len = float_fsz[ld.x6_sz];
-
- /*
- * fr0 & fr1 don't need to be checked because Illegal Instruction
- * faults have higher priority than unaligned faults.
- *
- * r0 cannot be found as the base as it would never generate an
- * unaligned reference.
- */
-
- /*
- * make sure we get clean buffers
- */
- memset(&fpr_init,0, sizeof(fpr_init));
- memset(&fpr_final,0, sizeof(fpr_final));
-
- /*
- * ldfX.a we don't try to emulate anything but we must
- * invalidate the ALAT entry.
- * See comments in ldX for descriptions on how the various loads are handled.
- */
- if (ld.x6_op != 0x2) {
- if (emulate_load(&fpr_init, ifa, len, kernel_mode))
- return -1;
-
- DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
- DDUMP("fpr_init =", &fpr_init, len);
- /*
- * we only do something for x6_op={0,8,9}
- */
- switch( ld.x6_sz ) {
- case 0:
- mem2float_extended(&fpr_init, &fpr_final);
- break;
- case 1:
- mem2float_integer(&fpr_init, &fpr_final);
- break;
- case 2:
- mem2float_single(&fpr_init, &fpr_final);
- break;
- case 3:
- mem2float_double(&fpr_init, &fpr_final);
- break;
- }
- DDUMP("fpr_final =", &fpr_final, len);
- /*
- * XXX fixme
- *
- * A possible optimization would be to drop fpr_final and directly
- * use the storage from the saved context i.e., the actual final
- * destination (pt_regs, switch_stack or thread structure).
- */
- setfpreg(ld.r1, &fpr_final, regs);
- }
-
- /*
- * check for updates on any loads
- */
- if (ld.op == 0x7 || ld.m)
- emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
-
- /*
- * invalidate ALAT entry in case of advanced floating point loads
- */
- if (ld.x6_op == 0x2)
- invala_fr(ld.r1);
-
- return 0;
-}
-
-
-static int
-emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
- bool kernel_mode)
-{
- struct ia64_fpreg fpr_init;
- struct ia64_fpreg fpr_final;
- unsigned long len = float_fsz[ld.x6_sz];
-
- /*
- * make sure we get clean buffers
- */
- memset(&fpr_init,0, sizeof(fpr_init));
- memset(&fpr_final,0, sizeof(fpr_final));
-
- /*
- * if we get to this handler, Nat bits on both r3 and r2 have already
- * been checked. so we don't need to do it
- *
- * extract the value to be stored
- */
- getfpreg(ld.imm, &fpr_init, regs);
- /*
- * during this step, we extract the spilled registers from the saved
- * context i.e., we refill. Then we store (no spill) to temporary
- * aligned location
- */
- switch( ld.x6_sz ) {
- case 0:
- float2mem_extended(&fpr_init, &fpr_final);
- break;
- case 1:
- float2mem_integer(&fpr_init, &fpr_final);
- break;
- case 2:
- float2mem_single(&fpr_init, &fpr_final);
- break;
- case 3:
- float2mem_double(&fpr_init, &fpr_final);
- break;
- }
- DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
- DDUMP("fpr_init =", &fpr_init, len);
- DDUMP("fpr_final =", &fpr_final, len);
-
- if (emulate_store(ifa, &fpr_final, len, kernel_mode))
- return -1;
-
- /*
- * stfX [r3]=r2,imm(9)
- *
- * NOTE:
- * ld.r3 can never be r0, because r0 would not generate an
- * unaligned access.
- */
- if (ld.op == 0x7) {
- unsigned long imm;
-
- /*
- * form imm9: [12:6] contain first 7bits
- */
- imm = ld.x << 7 | ld.r1;
- /*
- * sign extend (8bits) if m set
- */
- if (ld.m)
- imm |= SIGN_EXT9;
- /*
- * ifa == r3 (NaT is necessarily cleared)
- */
- ifa += imm;
-
- DPRINT("imm=%lx r3=%lx\n", imm, ifa);
-
- setreg(ld.r3, ifa, 0, regs);
- }
- /*
- * we don't have alat_invalidate_multiple() so we need
- * to do the complete flush :-<<
- */
- ia64_invala();
-
- return 0;
-}
-
-/*
- * Make sure we log the unaligned access, so that user/sysadmin can notice it and
- * eventually fix the program. However, we don't want to do that for every access so we
- * pace it with jiffies.
- */
-static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5);
-
-void
-ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
-{
- struct ia64_psr *ipsr = ia64_psr(regs);
- unsigned long bundle[2];
- unsigned long opcode;
- const struct exception_table_entry *eh = NULL;
- union {
- unsigned long l;
- load_store_t insn;
- } u;
- int ret = -1;
- bool kernel_mode = false;
-
- if (ia64_psr(regs)->be) {
- /* we don't support big-endian accesses */
- if (die_if_kernel("big-endian unaligned accesses are not supported", regs, 0))
- return;
- goto force_sigbus;
- }
-
- /*
- * Treat kernel accesses for which there is an exception handler entry the same as
- * user-level unaligned accesses. Otherwise, a clever program could trick this
- * handler into reading an arbitrary kernel addresses...
- */
- if (!user_mode(regs))
- eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
- if (user_mode(regs) || eh) {
- if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
- goto force_sigbus;
-
- if (!no_unaligned_warning &&
- !(current->thread.flags & IA64_THREAD_UAC_NOPRINT) &&
- __ratelimit(&logging_rate_limit))
- {
- char buf[200]; /* comm[] is at most 16 bytes... */
- size_t len;
-
- len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, "
- "ip=0x%016lx\n\r", current->comm,
- task_pid_nr(current),
- ifa, regs->cr_iip + ipsr->ri);
- /*
- * Don't call tty_write_message() if we're in the kernel; we might
- * be holding locks...
- */
- if (user_mode(regs)) {
- struct tty_struct *tty = get_current_tty();
- tty_write_message(tty, buf);
- tty_kref_put(tty);
- }
- buf[len-1] = '\0'; /* drop '\r' */
- /* watch for command names containing %s */
- printk(KERN_WARNING "%s", buf);
- } else {
- if (no_unaligned_warning) {
- printk_once(KERN_WARNING "%s(%d) encountered an "
- "unaligned exception which required\n"
- "kernel assistance, which degrades "
- "the performance of the application.\n"
- "Unaligned exception warnings have "
- "been disabled by the system "
- "administrator\n"
- "echo 0 > /proc/sys/kernel/ignore-"
- "unaligned-usertrap to re-enable\n",
- current->comm, task_pid_nr(current));
- }
- }
- } else {
- if (__ratelimit(&logging_rate_limit)) {
- printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
- ifa, regs->cr_iip + ipsr->ri);
- if (unaligned_dump_stack)
- dump_stack();
- }
- kernel_mode = true;
- }
-
- DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
- regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
-
- if (emulate_load(bundle, regs->cr_iip, 16, kernel_mode))
- goto failure;
-
- /*
- * extract the instruction from the bundle given the slot number
- */
- switch (ipsr->ri) {
- default:
- case 0: u.l = (bundle[0] >> 5); break;
- case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;
- case 2: u.l = (bundle[1] >> 23); break;
- }
- opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;
-
- DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d "
- "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm,
- u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, u.insn.op);
-
- /*
- * IMPORTANT:
- * Notice that the switch statement DOES not cover all possible instructions
- * that DO generate unaligned references. This is made on purpose because for some
- * instructions it DOES NOT make sense to try and emulate the access. Sometimes it
- * is WRONG to try and emulate. Here is a list of instruction we don't emulate i.e.,
- * the program will get a signal and die:
- *
- * load/store:
- * - ldX.spill
- * - stX.spill
- * Reason: RNATs are based on addresses
- * - ld16
- * - st16
- * Reason: ld16 and st16 are supposed to occur in a single
- * memory op
- *
- * synchronization:
- * - cmpxchg
- * - fetchadd
- * - xchg
- * Reason: ATOMIC operations cannot be emulated properly using multiple
- * instructions.
- *
- * speculative loads:
- * - ldX.sZ
- * Reason: side effects, code must be ready to deal with failure so simpler
- * to let the load fail.
- * ---------------------------------------------------------------------------------
- * XXX fixme
- *
- * I would like to get rid of this switch case and do something
- * more elegant.
- */
- switch (opcode) {
- case LDS_OP:
- case LDSA_OP:
- if (u.insn.x)
- /* oops, really a semaphore op (cmpxchg, etc) */
- goto failure;
- fallthrough;
- case LDS_IMM_OP:
- case LDSA_IMM_OP:
- case LDFS_OP:
- case LDFSA_OP:
- case LDFS_IMM_OP:
- /*
- * The instruction will be retried with deferred exceptions turned on, and
- * we should get Nat bit installed
- *
- * IMPORTANT: When PSR_ED is set, the register & immediate update forms
- * are actually executed even though the operation failed. So we don't
- * need to take care of this.
- */
- DPRINT("forcing PSR_ED\n");
- regs->cr_ipsr |= IA64_PSR_ED;
- goto done;
-
- case LD_OP:
- case LDA_OP:
- case LDBIAS_OP:
- case LDACQ_OP:
- case LDCCLR_OP:
- case LDCNC_OP:
- case LDCCLRACQ_OP:
- if (u.insn.x)
- /* oops, really a semaphore op (cmpxchg, etc) */
- goto failure;
- fallthrough;
- case LD_IMM_OP:
- case LDA_IMM_OP:
- case LDBIAS_IMM_OP:
- case LDACQ_IMM_OP:
- case LDCCLR_IMM_OP:
- case LDCNC_IMM_OP:
- case LDCCLRACQ_IMM_OP:
- ret = emulate_load_int(ifa, u.insn, regs, kernel_mode);
- break;
-
- case ST_OP:
- case STREL_OP:
- if (u.insn.x)
- /* oops, really a semaphore op (cmpxchg, etc) */
- goto failure;
- fallthrough;
- case ST_IMM_OP:
- case STREL_IMM_OP:
- ret = emulate_store_int(ifa, u.insn, regs, kernel_mode);
- break;
-
- case LDF_OP:
- case LDFA_OP:
- case LDFCCLR_OP:
- case LDFCNC_OP:
- if (u.insn.x)
- ret = emulate_load_floatpair(ifa, u.insn, regs, kernel_mode);
- else
- ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
- break;
-
- case LDF_IMM_OP:
- case LDFA_IMM_OP:
- case LDFCCLR_IMM_OP:
- case LDFCNC_IMM_OP:
- ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
- break;
-
- case STF_OP:
- case STF_IMM_OP:
- ret = emulate_store_float(ifa, u.insn, regs, kernel_mode);
- break;
-
- default:
- goto failure;
- }
- DPRINT("ret=%d\n", ret);
- if (ret)
- goto failure;
-
- if (ipsr->ri == 2)
- /*
- * given today's architecture this case is not likely to happen because a
- * memory access instruction (M) can never be in the last slot of a
- * bundle. But let's keep it for now.
- */
- regs->cr_iip += 16;
- ipsr->ri = (ipsr->ri + 1) & 0x3;
-
- DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
- done:
- return;
-
- failure:
- /* something went wrong... */
- if (!user_mode(regs)) {
- if (eh) {
- ia64_handle_exception(regs, eh);
- goto done;
- }
- if (die_if_kernel("error during unaligned kernel access\n", regs, ret))
- return;
- /* NOT_REACHED */
- }
- force_sigbus:
- force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) ifa,
- 0, 0, 0);
- goto done;
-}
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
deleted file mode 100644
index a0fec82c56..0000000000
--- a/arch/ia64/kernel/uncached.c
+++ /dev/null
@@ -1,273 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
- *
- * A simple uncached page allocator using the generic allocator. This
- * allocator first utilizes the spare (spill) pages found in the EFI
- * memmap and will then start converting cached pages to uncached ones
- * at a granule at a time. Node awareness is implemented by having a
- * pool of pages per node.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/efi.h>
-#include <linux/nmi.h>
-#include <linux/genalloc.h>
-#include <linux/gfp.h>
-#include <linux/pgtable.h>
-#include <asm/efi.h>
-#include <asm/page.h>
-#include <asm/pal.h>
-#include <linux/atomic.h>
-#include <asm/tlbflush.h>
-
-struct uncached_pool {
- struct gen_pool *pool;
- struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
- int nchunks_added; /* #of converted chunks added to pool */
- atomic_t status; /* smp called function's return status*/
-};
-
-#define MAX_CONVERTED_CHUNKS_PER_NODE 2
-
-struct uncached_pool uncached_pools[MAX_NUMNODES];
-
-
-static void uncached_ipi_visibility(void *data)
-{
- int status;
- struct uncached_pool *uc_pool = (struct uncached_pool *)data;
-
- status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
- if ((status != PAL_VISIBILITY_OK) &&
- (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
- atomic_inc(&uc_pool->status);
-}
-
-
-static void uncached_ipi_mc_drain(void *data)
-{
- int status;
- struct uncached_pool *uc_pool = (struct uncached_pool *)data;
-
- status = ia64_pal_mc_drain();
- if (status != PAL_STATUS_SUCCESS)
- atomic_inc(&uc_pool->status);
-}
-
-
-/*
- * Add a new chunk of uncached memory pages to the specified pool.
- *
- * @pool: pool to add new chunk of uncached memory to
- * @nid: node id of node to allocate memory from, or -1
- *
- * This is accomplished by first allocating a granule of cached memory pages
- * and then converting them to uncached memory pages.
- */
-static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
-{
- struct page *page;
- int status, i, nchunks_added = uc_pool->nchunks_added;
- unsigned long c_addr, uc_addr;
-
- if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
- return -1; /* interrupted by a signal */
-
- if (uc_pool->nchunks_added > nchunks_added) {
- /* someone added a new chunk while we were waiting */
- mutex_unlock(&uc_pool->add_chunk_mutex);
- return 0;
- }
-
- if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
- mutex_unlock(&uc_pool->add_chunk_mutex);
- return -1;
- }
-
- /* attempt to allocate a granule's worth of cached memory pages */
-
- page = __alloc_pages_node(nid,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
- IA64_GRANULE_SHIFT-PAGE_SHIFT);
- if (!page) {
- mutex_unlock(&uc_pool->add_chunk_mutex);
- return -1;
- }
-
- /* convert the memory pages from cached to uncached */
-
- c_addr = (unsigned long)page_address(page);
- uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
-
- /*
- * There's a small race here where it's possible for someone to
- * access the page through /dev/mem halfway through the conversion
- * to uncached - not sure it's really worth bothering about
- */
- for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
- SetPageUncached(&page[i]);
-
- flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
-
- status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
- if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
- atomic_set(&uc_pool->status, 0);
- smp_call_function(uncached_ipi_visibility, uc_pool, 1);
- if (atomic_read(&uc_pool->status))
- goto failed;
- } else if (status != PAL_VISIBILITY_OK)
- goto failed;
-
- preempt_disable();
-
- flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
-
- /* flush the just introduced uncached translation from the TLB */
- local_flush_tlb_all();
-
- preempt_enable();
-
- status = ia64_pal_mc_drain();
- if (status != PAL_STATUS_SUCCESS)
- goto failed;
- atomic_set(&uc_pool->status, 0);
- smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
- if (atomic_read(&uc_pool->status))
- goto failed;
-
- /*
- * The chunk of memory pages has been converted to uncached so now we
- * can add it to the pool.
- */
- status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
- if (status)
- goto failed;
-
- uc_pool->nchunks_added++;
- mutex_unlock(&uc_pool->add_chunk_mutex);
- return 0;
-
- /* failed to convert or add the chunk so give it back to the kernel */
-failed:
- for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
- ClearPageUncached(&page[i]);
-
- free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
- mutex_unlock(&uc_pool->add_chunk_mutex);
- return -1;
-}
-
-
-/*
- * uncached_alloc_page
- *
- * @starting_nid: node id of node to start with, or -1
- * @n_pages: number of contiguous pages to allocate
- *
- * Allocate the specified number of contiguous uncached pages on the
- * requested node. If not enough contiguous uncached pages are available
- * on the requested node, roundrobin starting with the next higher node.
- */
-unsigned long uncached_alloc_page(int starting_nid, int n_pages)
-{
- unsigned long uc_addr;
- struct uncached_pool *uc_pool;
- int nid;
-
- if (unlikely(starting_nid >= MAX_NUMNODES))
- return 0;
-
- if (starting_nid < 0)
- starting_nid = numa_node_id();
- nid = starting_nid;
-
- do {
- if (!node_state(nid, N_HIGH_MEMORY))
- continue;
- uc_pool = &uncached_pools[nid];
- if (uc_pool->pool == NULL)
- continue;
- do {
- uc_addr = gen_pool_alloc(uc_pool->pool,
- n_pages * PAGE_SIZE);
- if (uc_addr != 0)
- return uc_addr;
- } while (uncached_add_chunk(uc_pool, nid) == 0);
-
- } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
-
- return 0;
-}
-EXPORT_SYMBOL(uncached_alloc_page);
-
-
-/*
- * uncached_free_page
- *
- * @uc_addr: uncached address of first page to free
- * @n_pages: number of contiguous pages to free
- *
- * Free the specified number of uncached pages.
- */
-void uncached_free_page(unsigned long uc_addr, int n_pages)
-{
- int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
- struct gen_pool *pool = uncached_pools[nid].pool;
-
- if (unlikely(pool == NULL))
- return;
-
- if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
- panic("uncached_free_page invalid address %lx\n", uc_addr);
-
- gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
-}
-EXPORT_SYMBOL(uncached_free_page);
-
-
-/*
- * uncached_build_memmap,
- *
- * @uc_start: uncached starting address of a chunk of uncached memory
- * @uc_end: uncached ending address of a chunk of uncached memory
- * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
- *
- * Called at boot time to build a map of pages that can be used for
- * memory special operations.
- */
-static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
-{
- int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
- struct gen_pool *pool = uncached_pools[nid].pool;
- size_t size = uc_end - uc_start;
-
- touch_softlockup_watchdog();
-
- if (pool != NULL) {
- memset((char *)uc_start, 0, size);
- (void) gen_pool_add(pool, uc_start, size, nid);
- }
- return 0;
-}
-
-
-static int __init uncached_init(void)
-{
- int nid;
-
- for_each_online_node(nid) {
- uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
- mutex_init(&uncached_pools[nid].add_chunk_mutex);
- }
-
- efi_memmap_walk_uc(uncached_build_memmap, NULL);
- return 0;
-}
-
-__initcall(uncached_init);
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
deleted file mode 100644
index 6bd64c35e6..0000000000
--- a/arch/ia64/kernel/unwind.c
+++ /dev/null
@@ -1,2320 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 1999-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
- * - Change pt_regs_off() to make it less dependent on pt_regs structure.
- */
-/*
- * This file implements call frame unwind support for the Linux
- * kernel. Parsing and processing the unwind information is
- * time-consuming, so this implementation translates the unwind
- * descriptors into unwind scripts. These scripts are very simple
- * (basically a sequence of assignments) and efficient to execute.
- * They are cached for later re-use. Each script is specific for a
- * given instruction pointer address and the set of predicate values
- * that the script depends on (most unwind descriptors are
- * unconditional and scripts often do not depend on predicates at
- * all). This code is based on the unwind conventions described in
- * the "IA-64 Software Conventions and Runtime Architecture" manual.
- *
- * SMP conventions:
- * o updates to the global unwind data (in structure "unw") are serialized
- * by the unw.lock spinlock
- * o each unwind script has its own read-write lock; a thread must acquire
- * a read lock before executing a script and must acquire a write lock
- * before modifying a script
- * o if both the unw.lock spinlock and a script's read-write lock must be
- * acquired, then the read-write lock must be acquired first.
- */
-#include <linux/module.h>
-#include <linux/memblock.h>
-#include <linux/elf.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include <asm/unwind.h>
-
-#include <asm/delay.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/ptrace_offsets.h>
-#include <asm/rse.h>
-#include <asm/sections.h>
-#include <linux/uaccess.h>
-
-#include "entry.h"
-#include "unwind_i.h"
-
-#define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
-#define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
-
-#define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
-#define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
-
-#define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
-
-#ifdef UNW_DEBUG
- static unsigned int unw_debug_level = UNW_DEBUG;
-# define UNW_DEBUG_ON(n) unw_debug_level >= n
- /* Do not code a printk level, not all debug lines end in newline */
-# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
-# undef inline
-# define inline
-#else /* !UNW_DEBUG */
-# define UNW_DEBUG_ON(n) 0
-# define UNW_DPRINT(n, ...)
-#endif /* UNW_DEBUG */
-
-#if UNW_STATS
-# define STAT(x...) x
-#else
-# define STAT(x...)
-#endif
-
-#define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
-#define free_reg_state(usr) kfree(usr)
-#define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
-#define free_labeled_state(usr) kfree(usr)
-
-typedef unsigned long unw_word;
-typedef unsigned char unw_hash_index_t;
-
-static struct {
- spinlock_t lock; /* spinlock for unwind data */
-
- /* list of unwind tables (one per load-module) */
- struct unw_table *tables;
-
- unsigned long r0; /* constant 0 for r0 */
-
- /* table of registers that prologues can save (and order in which they're saved): */
- const unsigned char save_order[8];
-
- /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
- unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
-
- unsigned short lru_head; /* index of lead-recently used script */
- unsigned short lru_tail; /* index of most-recently used script */
-
- /* index into unw_frame_info for preserved register i */
- unsigned short preg_index[UNW_NUM_REGS];
-
- short pt_regs_offsets[32];
-
- /* unwind table for the kernel: */
- struct unw_table kernel_table;
-
- /* unwind table describing the gate page (kernel code that is mapped into user space): */
- size_t gate_table_size;
- unsigned long *gate_table;
-
- /* hash table that maps instruction pointer to script index: */
- unsigned short hash[UNW_HASH_SIZE];
-
- /* script cache: */
- struct unw_script cache[UNW_CACHE_SIZE];
-
-# ifdef UNW_DEBUG
- const char *preg_name[UNW_NUM_REGS];
-# endif
-# if UNW_STATS
- struct {
- struct {
- int lookups;
- int hinted_hits;
- int normal_hits;
- int collision_chain_traversals;
- } cache;
- struct {
- unsigned long build_time;
- unsigned long run_time;
- unsigned long parse_time;
- int builds;
- int news;
- int collisions;
- int runs;
- } script;
- struct {
- unsigned long init_time;
- unsigned long unwind_time;
- int inits;
- int unwinds;
- } api;
- } stat;
-# endif
-} unw = {
- .tables = &unw.kernel_table,
- .lock = __SPIN_LOCK_UNLOCKED(unw.lock),
- .save_order = {
- UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
- UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
- },
- .preg_index = {
- offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
- offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
- offsetof(struct unw_frame_info, bsp_loc)/8,
- offsetof(struct unw_frame_info, bspstore_loc)/8,
- offsetof(struct unw_frame_info, pfs_loc)/8,
- offsetof(struct unw_frame_info, rnat_loc)/8,
- offsetof(struct unw_frame_info, psp)/8,
- offsetof(struct unw_frame_info, rp_loc)/8,
- offsetof(struct unw_frame_info, r4)/8,
- offsetof(struct unw_frame_info, r5)/8,
- offsetof(struct unw_frame_info, r6)/8,
- offsetof(struct unw_frame_info, r7)/8,
- offsetof(struct unw_frame_info, unat_loc)/8,
- offsetof(struct unw_frame_info, pr_loc)/8,
- offsetof(struct unw_frame_info, lc_loc)/8,
- offsetof(struct unw_frame_info, fpsr_loc)/8,
- offsetof(struct unw_frame_info, b1_loc)/8,
- offsetof(struct unw_frame_info, b2_loc)/8,
- offsetof(struct unw_frame_info, b3_loc)/8,
- offsetof(struct unw_frame_info, b4_loc)/8,
- offsetof(struct unw_frame_info, b5_loc)/8,
- offsetof(struct unw_frame_info, f2_loc)/8,
- offsetof(struct unw_frame_info, f3_loc)/8,
- offsetof(struct unw_frame_info, f4_loc)/8,
- offsetof(struct unw_frame_info, f5_loc)/8,
- offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
- },
- .pt_regs_offsets = {
- [0] = -1,
- offsetof(struct pt_regs, r1),
- offsetof(struct pt_regs, r2),
- offsetof(struct pt_regs, r3),
- [4] = -1, [5] = -1, [6] = -1, [7] = -1,
- offsetof(struct pt_regs, r8),
- offsetof(struct pt_regs, r9),
- offsetof(struct pt_regs, r10),
- offsetof(struct pt_regs, r11),
- offsetof(struct pt_regs, r12),
- offsetof(struct pt_regs, r13),
- offsetof(struct pt_regs, r14),
- offsetof(struct pt_regs, r15),
- offsetof(struct pt_regs, r16),
- offsetof(struct pt_regs, r17),
- offsetof(struct pt_regs, r18),
- offsetof(struct pt_regs, r19),
- offsetof(struct pt_regs, r20),
- offsetof(struct pt_regs, r21),
- offsetof(struct pt_regs, r22),
- offsetof(struct pt_regs, r23),
- offsetof(struct pt_regs, r24),
- offsetof(struct pt_regs, r25),
- offsetof(struct pt_regs, r26),
- offsetof(struct pt_regs, r27),
- offsetof(struct pt_regs, r28),
- offsetof(struct pt_regs, r29),
- offsetof(struct pt_regs, r30),
- offsetof(struct pt_regs, r31),
- },
- .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
-#ifdef UNW_DEBUG
- .preg_name = {
- "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
- "r4", "r5", "r6", "r7",
- "ar.unat", "pr", "ar.lc", "ar.fpsr",
- "b1", "b2", "b3", "b4", "b5",
- "f2", "f3", "f4", "f5",
- "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
- "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
- }
-#endif
-};
-
-static inline int
-read_only (void *addr)
-{
- return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
-}
-
-/*
- * Returns offset of rREG in struct pt_regs.
- */
-static inline unsigned long
-pt_regs_off (unsigned long reg)
-{
- short off = -1;
-
- if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
- off = unw.pt_regs_offsets[reg];
-
- if (off < 0) {
- UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
- off = 0;
- }
- return (unsigned long) off;
-}
-
-static inline struct pt_regs *
-get_scratch_regs (struct unw_frame_info *info)
-{
- if (!info->pt) {
- /* This should not happen with valid unwind info. */
- UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
- if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
- info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
- else
- info->pt = info->sp - 16;
- }
- UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
- return (struct pt_regs *) info->pt;
-}
-
-/* Unwind accessors. */
-
-int
-unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
-{
- unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
- struct unw_ireg *ireg;
- struct pt_regs *pt;
-
- if ((unsigned) regnum - 1 >= 127) {
- if (regnum == 0 && !write) {
- *val = 0; /* read r0 always returns 0 */
- *nat = 0;
- return 0;
- }
- UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
- __func__, regnum);
- return -1;
- }
-
- if (regnum < 32) {
- if (regnum >= 4 && regnum <= 7) {
- /* access a preserved register */
- ireg = &info->r4 + (regnum - 4);
- addr = ireg->loc;
- if (addr) {
- nat_addr = addr + ireg->nat.off;
- switch (ireg->nat.type) {
- case UNW_NAT_VAL:
- /* simulate getf.sig/setf.sig */
- if (write) {
- if (*nat) {
- /* write NaTVal and be done with it */
- addr[0] = 0;
- addr[1] = 0x1fffe;
- return 0;
- }
- addr[1] = 0x1003e;
- } else {
- if (addr[0] == 0 && addr[1] == 0x1ffe) {
- /* return NaT and be done with it */
- *val = 0;
- *nat = 1;
- return 0;
- }
- }
- fallthrough;
- case UNW_NAT_NONE:
- dummy_nat = 0;
- nat_addr = &dummy_nat;
- break;
-
- case UNW_NAT_MEMSTK:
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- break;
-
- case UNW_NAT_REGSTK:
- nat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) addr < info->regstk.limit
- || (unsigned long) addr >= info->regstk.top)
- {
- UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
- "[0x%lx-0x%lx)\n",
- __func__, (void *) addr,
- info->regstk.limit,
- info->regstk.top);
- return -1;
- }
- if ((unsigned long) nat_addr >= info->regstk.top)
- nat_addr = &info->sw->ar_rnat;
- nat_mask = (1UL << ia64_rse_slot_num(addr));
- break;
- }
- } else {
- addr = &info->sw->r4 + (regnum - 4);
- nat_addr = &info->sw->ar_unat;
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- }
- } else {
- /* access a scratch register */
- pt = get_scratch_regs(info);
- addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
- if (info->pri_unat_loc)
- nat_addr = info->pri_unat_loc;
- else
- nat_addr = &info->sw->caller_unat;
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- }
- } else {
- /* access a stacked register */
- addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
- nat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) addr < info->regstk.limit
- || (unsigned long) addr >= info->regstk.top)
- {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
- "of rbs\n", __func__);
- return -1;
- }
- if ((unsigned long) nat_addr >= info->regstk.top)
- nat_addr = &info->sw->ar_rnat;
- nat_mask = (1UL << ia64_rse_slot_num(addr));
- }
-
- if (write) {
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __func__);
- } else {
- *addr = *val;
- if (*nat)
- *nat_addr |= nat_mask;
- else
- *nat_addr &= ~nat_mask;
- }
- } else {
- if ((*nat_addr & nat_mask) == 0) {
- *val = *addr;
- *nat = 0;
- } else {
- *val = 0; /* if register is a NaT, *addr may contain kernel data! */
- *nat = 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(unw_access_gr);
-
-int
-unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
-{
- unsigned long *addr;
- struct pt_regs *pt;
-
- switch (regnum) {
- /* scratch: */
- case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
- case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
- case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
-
- /* preserved: */
- case 1: case 2: case 3: case 4: case 5:
- addr = *(&info->b1_loc + (regnum - 1));
- if (!addr)
- addr = &info->sw->b1 + (regnum - 1);
- break;
-
- default:
- UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
- __func__, regnum);
- return -1;
- }
- if (write)
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __func__);
- } else
- *addr = *val;
- else
- *val = *addr;
- return 0;
-}
-EXPORT_SYMBOL(unw_access_br);
-
-int
-unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
-{
- struct ia64_fpreg *addr = NULL;
- struct pt_regs *pt;
-
- if ((unsigned) (regnum - 2) >= 126) {
- UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
- __func__, regnum);
- return -1;
- }
-
- if (regnum <= 5) {
- addr = *(&info->f2_loc + (regnum - 2));
- if (!addr)
- addr = &info->sw->f2 + (regnum - 2);
- } else if (regnum <= 15) {
- if (regnum <= 11) {
- pt = get_scratch_regs(info);
- addr = &pt->f6 + (regnum - 6);
- }
- else
- addr = &info->sw->f12 + (regnum - 12);
- } else if (regnum <= 31) {
- addr = info->fr_loc[regnum - 16];
- if (!addr)
- addr = &info->sw->f16 + (regnum - 16);
- } else {
- struct task_struct *t = info->task;
-
- if (write)
- ia64_sync_fph(t);
- else
- ia64_flush_fph(t);
- addr = t->thread.fph + (regnum - 32);
- }
-
- if (write)
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __func__);
- } else
- *addr = *val;
- else
- *val = *addr;
- return 0;
-}
-EXPORT_SYMBOL(unw_access_fr);
-
-int
-unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
-{
- unsigned long *addr;
- struct pt_regs *pt;
-
- switch (regnum) {
- case UNW_AR_BSP:
- addr = info->bsp_loc;
- if (!addr)
- addr = &info->sw->ar_bspstore;
- break;
-
- case UNW_AR_BSPSTORE:
- addr = info->bspstore_loc;
- if (!addr)
- addr = &info->sw->ar_bspstore;
- break;
-
- case UNW_AR_PFS:
- addr = info->pfs_loc;
- if (!addr)
- addr = &info->sw->ar_pfs;
- break;
-
- case UNW_AR_RNAT:
- addr = info->rnat_loc;
- if (!addr)
- addr = &info->sw->ar_rnat;
- break;
-
- case UNW_AR_UNAT:
- addr = info->unat_loc;
- if (!addr)
- addr = &info->sw->caller_unat;
- break;
-
- case UNW_AR_LC:
- addr = info->lc_loc;
- if (!addr)
- addr = &info->sw->ar_lc;
- break;
-
- case UNW_AR_EC:
- if (!info->cfm_loc)
- return -1;
- if (write)
- *info->cfm_loc =
- (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
- else
- *val = (*info->cfm_loc >> 52) & 0x3f;
- return 0;
-
- case UNW_AR_FPSR:
- addr = info->fpsr_loc;
- if (!addr)
- addr = &info->sw->ar_fpsr;
- break;
-
- case UNW_AR_RSC:
- pt = get_scratch_regs(info);
- addr = &pt->ar_rsc;
- break;
-
- case UNW_AR_CCV:
- pt = get_scratch_regs(info);
- addr = &pt->ar_ccv;
- break;
-
- case UNW_AR_CSD:
- pt = get_scratch_regs(info);
- addr = &pt->ar_csd;
- break;
-
- case UNW_AR_SSD:
- pt = get_scratch_regs(info);
- addr = &pt->ar_ssd;
- break;
-
- default:
- UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
- __func__, regnum);
- return -1;
- }
-
- if (write) {
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __func__);
- } else
- *addr = *val;
- } else
- *val = *addr;
- return 0;
-}
-EXPORT_SYMBOL(unw_access_ar);
-
-int
-unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
-{
- unsigned long *addr;
-
- addr = info->pr_loc;
- if (!addr)
- addr = &info->sw->pr;
-
- if (write) {
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __func__);
- } else
- *addr = *val;
- } else
- *val = *addr;
- return 0;
-}
-EXPORT_SYMBOL(unw_access_pr);
-
-
-/* Routines to manipulate the state stack. */
-
-static inline void
-push (struct unw_state_record *sr)
-{
- struct unw_reg_state *rs;
-
- rs = alloc_reg_state();
- if (!rs) {
- printk(KERN_ERR "unwind: cannot stack reg state!\n");
- return;
- }
- memcpy(rs, &sr->curr, sizeof(*rs));
- sr->curr.next = rs;
-}
-
-static void
-pop (struct unw_state_record *sr)
-{
- struct unw_reg_state *rs = sr->curr.next;
-
- if (!rs) {
- printk(KERN_ERR "unwind: stack underflow!\n");
- return;
- }
- memcpy(&sr->curr, rs, sizeof(*rs));
- free_reg_state(rs);
-}
-
-/* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
-static struct unw_reg_state *
-dup_state_stack (struct unw_reg_state *rs)
-{
- struct unw_reg_state *copy, *prev = NULL, *first = NULL;
-
- while (rs) {
- copy = alloc_reg_state();
- if (!copy) {
- printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
- return NULL;
- }
- memcpy(copy, rs, sizeof(*copy));
- if (first)
- prev->next = copy;
- else
- first = copy;
- rs = rs->next;
- prev = copy;
- }
- return first;
-}
-
-/* Free all stacked register states (but not RS itself). */
-static void
-free_state_stack (struct unw_reg_state *rs)
-{
- struct unw_reg_state *p, *next;
-
- for (p = rs->next; p != NULL; p = next) {
- next = p->next;
- free_reg_state(p);
- }
- rs->next = NULL;
-}
-
-/* Unwind decoder routines */
-
-static enum unw_register_index __attribute_const__
-decode_abreg (unsigned char abreg, int memory)
-{
- switch (abreg) {
- case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
- case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
- case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
- case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
- case 0x60: return UNW_REG_PR;
- case 0x61: return UNW_REG_PSP;
- case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
- case 0x63: return UNW_REG_RP;
- case 0x64: return UNW_REG_BSP;
- case 0x65: return UNW_REG_BSPSTORE;
- case 0x66: return UNW_REG_RNAT;
- case 0x67: return UNW_REG_UNAT;
- case 0x68: return UNW_REG_FPSR;
- case 0x69: return UNW_REG_PFS;
- case 0x6a: return UNW_REG_LC;
- default:
- break;
- }
- UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
- return UNW_REG_LC;
-}
-
-static void
-set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
-{
- reg->val = val;
- reg->where = where;
- if (reg->when == UNW_WHEN_NEVER)
- reg->when = when;
-}
-
-static void
-alloc_spill_area (unsigned long *offp, unsigned long regsize,
- struct unw_reg_info *lo, struct unw_reg_info *hi)
-{
- struct unw_reg_info *reg;
-
- for (reg = hi; reg >= lo; --reg) {
- if (reg->where == UNW_WHERE_SPILL_HOME) {
- reg->where = UNW_WHERE_PSPREL;
- *offp -= regsize;
- reg->val = *offp;
- }
- }
-}
-
-static inline void
-spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
-{
- struct unw_reg_info *reg;
-
- for (reg = *regp; reg <= lim; ++reg) {
- if (reg->where == UNW_WHERE_SPILL_HOME) {
- reg->when = t;
- *regp = reg + 1;
- return;
- }
- }
- UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__);
-}
-
-static inline void
-finish_prologue (struct unw_state_record *sr)
-{
- struct unw_reg_info *reg;
- unsigned long off;
- int i;
-
- /*
- * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
- * for Using Unwind Descriptors", rule 3):
- */
- for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
- reg = sr->curr.reg + unw.save_order[i];
- if (reg->where == UNW_WHERE_GR_SAVE) {
- reg->where = UNW_WHERE_GR;
- reg->val = sr->gr_save_loc++;
- }
- }
-
- /*
- * Next, compute when the fp, general, and branch registers get
- * saved. This must come before alloc_spill_area() because
- * we need to know which registers are spilled to their home
- * locations.
- */
- if (sr->imask) {
- unsigned char kind, mask = 0, *cp = sr->imask;
- int t;
- static const unsigned char limit[3] = {
- UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
- };
- struct unw_reg_info *(regs[3]);
-
- regs[0] = sr->curr.reg + UNW_REG_F2;
- regs[1] = sr->curr.reg + UNW_REG_R4;
- regs[2] = sr->curr.reg + UNW_REG_B1;
-
- for (t = 0; t < sr->region_len; ++t) {
- if ((t & 3) == 0)
- mask = *cp++;
- kind = (mask >> 2*(3-(t & 3))) & 3;
- if (kind > 0)
- spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
- sr->region_start + t);
- }
- }
- /*
- * Next, lay out the memory stack spill area:
- */
- if (sr->any_spills) {
- off = sr->spill_offset;
- alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
- alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
- alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
- }
-}
-
-/*
- * Region header descriptors.
- */
-
-static void
-desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
- struct unw_state_record *sr)
-{
- int i, region_start;
-
- if (!(sr->in_body || sr->first_region))
- finish_prologue(sr);
- sr->first_region = 0;
-
- /* check if we're done: */
- if (sr->when_target < sr->region_start + sr->region_len) {
- sr->done = 1;
- return;
- }
-
- region_start = sr->region_start + sr->region_len;
-
- for (i = 0; i < sr->epilogue_count; ++i)
- pop(sr);
- sr->epilogue_count = 0;
- sr->epilogue_start = UNW_WHEN_NEVER;
-
- sr->region_start = region_start;
- sr->region_len = rlen;
- sr->in_body = body;
-
- if (!body) {
- push(sr);
-
- for (i = 0; i < 4; ++i) {
- if (mask & 0x8)
- set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
- sr->region_start + sr->region_len - 1, grsave++);
- mask <<= 1;
- }
- sr->gr_save_loc = grsave;
- sr->any_spills = 0;
- sr->imask = NULL;
- sr->spill_offset = 0x10; /* default to psp+16 */
- }
-}
-
-/*
- * Prologue descriptors.
- */
-
-static inline void
-desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
-{
- if (abi == 3 && context == 'i') {
- sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
- UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__);
- }
- else
- UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
- __func__, abi, context);
-}
-
-static inline void
-desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 5; ++i) {
- if (brmask & 1)
- set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
- sr->region_start + sr->region_len - 1, gr++);
- brmask >>= 1;
- }
-}
-
-static inline void
-desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 5; ++i) {
- if (brmask & 1) {
- set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- brmask >>= 1;
- }
-}
-
-static inline void
-desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 4; ++i) {
- if ((grmask & 1) != 0) {
- set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- grmask >>= 1;
- }
- for (i = 0; i < 20; ++i) {
- if ((frmask & 1) != 0) {
- int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
- set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- frmask >>= 1;
- }
-}
-
-static inline void
-desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 4; ++i) {
- if ((frmask & 1) != 0) {
- set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- frmask >>= 1;
- }
-}
-
-static inline void
-desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 4; ++i) {
- if ((grmask & 1) != 0)
- set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
- sr->region_start + sr->region_len - 1, gr++);
- grmask >>= 1;
- }
-}
-
-static inline void
-desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 4; ++i) {
- if ((grmask & 1) != 0) {
- set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- grmask >>= 1;
- }
-}
-
-static inline void
-desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
-{
- set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
- sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
-}
-
-static inline void
-desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
-{
- sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
-}
-
-static inline void
-desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
-{
- set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
-}
-
-static inline void
-desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
-{
- set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
- 0x10 - 4*pspoff);
-}
-
-static inline void
-desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
-{
- set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
- 4*spoff);
-}
-
-static inline void
-desc_rp_br (unsigned char dst, struct unw_state_record *sr)
-{
- sr->return_link_reg = dst;
-}
-
-static inline void
-desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
-{
- struct unw_reg_info *reg = sr->curr.reg + regnum;
-
- if (reg->where == UNW_WHERE_NONE)
- reg->where = UNW_WHERE_GR_SAVE;
- reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
-}
-
-static inline void
-desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
-{
- sr->spill_offset = 0x10 - 4*pspoff;
-}
-
-static inline unsigned char *
-desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
-{
- sr->imask = imaskp;
- return imaskp + (2*sr->region_len + 7)/8;
-}
-
-/*
- * Body descriptors.
- */
-static inline void
-desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
-{
- sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
- sr->epilogue_count = ecount + 1;
-}
-
-static inline void
-desc_copy_state (unw_word label, struct unw_state_record *sr)
-{
- struct unw_labeled_state *ls;
-
- for (ls = sr->labeled_states; ls; ls = ls->next) {
- if (ls->label == label) {
- free_state_stack(&sr->curr);
- memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
- sr->curr.next = dup_state_stack(ls->saved_state.next);
- return;
- }
- }
- printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
-}
-
-static inline void
-desc_label_state (unw_word label, struct unw_state_record *sr)
-{
- struct unw_labeled_state *ls;
-
- ls = alloc_labeled_state();
- if (!ls) {
- printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
- return;
- }
- ls->label = label;
- memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
- ls->saved_state.next = dup_state_stack(sr->curr.next);
-
- /* insert into list of labeled states: */
- ls->next = sr->labeled_states;
- sr->labeled_states = ls;
-}
-
-/*
- * General descriptors.
- */
-
-static inline int
-desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
-{
- if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
- return 0;
- if (qp > 0) {
- if ((sr->pr_val & (1UL << qp)) == 0)
- return 0;
- sr->pr_mask |= (1UL << qp);
- }
- return 1;
-}
-
-static inline void
-desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
-{
- struct unw_reg_info *r;
-
- if (!desc_is_active(qp, t, sr))
- return;
-
- r = sr->curr.reg + decode_abreg(abreg, 0);
- r->where = UNW_WHERE_NONE;
- r->when = UNW_WHEN_NEVER;
- r->val = 0;
-}
-
-static inline void
-desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
- unsigned char ytreg, struct unw_state_record *sr)
-{
- enum unw_where where = UNW_WHERE_GR;
- struct unw_reg_info *r;
-
- if (!desc_is_active(qp, t, sr))
- return;
-
- if (x)
- where = UNW_WHERE_BR;
- else if (ytreg & 0x80)
- where = UNW_WHERE_FR;
-
- r = sr->curr.reg + decode_abreg(abreg, 0);
- r->where = where;
- r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
- r->val = (ytreg & 0x7f);
-}
-
-static inline void
-desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
- struct unw_state_record *sr)
-{
- struct unw_reg_info *r;
-
- if (!desc_is_active(qp, t, sr))
- return;
-
- r = sr->curr.reg + decode_abreg(abreg, 1);
- r->where = UNW_WHERE_PSPREL;
- r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
- r->val = 0x10 - 4*pspoff;
-}
-
-static inline void
-desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
- struct unw_state_record *sr)
-{
- struct unw_reg_info *r;
-
- if (!desc_is_active(qp, t, sr))
- return;
-
- r = sr->curr.reg + decode_abreg(abreg, 1);
- r->where = UNW_WHERE_SPREL;
- r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
- r->val = 4*spoff;
-}
-
-#define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
- code);
-
-/*
- * region headers:
- */
-#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
-#define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
-/*
- * prologue descriptors:
- */
-#define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
-#define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
-#define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
-#define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
-#define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
-#define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
-#define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
-#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
-#define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
-#define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
-#define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
-#define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
-#define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
-#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
-#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
-#define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
-#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
-#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
-#define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
-#define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
-#define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
-/*
- * body descriptors:
- */
-#define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
-#define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
-#define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
-/*
- * general unwind descriptors:
- */
-#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
-#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
-#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
-#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
-#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
-#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
-#define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
-#define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
-
-#include "unwind_decoder.c"
-
-
-/* Unwind scripts. */
-
-static inline unw_hash_index_t
-hash (unsigned long ip)
-{
- /* magic number = ((sqrt(5)-1)/2)*2^64 */
- static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
-
- return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
-}
-
-static inline long
-cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
-{
- read_lock(&script->lock);
- if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
- /* keep the read lock... */
- return 1;
- read_unlock(&script->lock);
- return 0;
-}
-
-static inline struct unw_script *
-script_lookup (struct unw_frame_info *info)
-{
- struct unw_script *script = unw.cache + info->hint;
- unsigned short index;
- unsigned long ip, pr;
-
- if (UNW_DEBUG_ON(0))
- return NULL; /* Always regenerate scripts in debug mode */
-
- STAT(++unw.stat.cache.lookups);
-
- ip = info->ip;
- pr = info->pr;
-
- if (cache_match(script, ip, pr)) {
- STAT(++unw.stat.cache.hinted_hits);
- return script;
- }
-
- index = unw.hash[hash(ip)];
- if (index >= UNW_CACHE_SIZE)
- return NULL;
-
- script = unw.cache + index;
- while (1) {
- if (cache_match(script, ip, pr)) {
- /* update hint; no locking required as single-word writes are atomic */
- STAT(++unw.stat.cache.normal_hits);
- unw.cache[info->prev_script].hint = script - unw.cache;
- return script;
- }
- if (script->coll_chain >= UNW_HASH_SIZE)
- return NULL;
- script = unw.cache + script->coll_chain;
- STAT(++unw.stat.cache.collision_chain_traversals);
- }
-}
-
-/*
- * On returning, a write lock for the SCRIPT is still being held.
- */
-static inline struct unw_script *
-script_new (unsigned long ip)
-{
- struct unw_script *script, *prev, *tmp;
- unw_hash_index_t index;
- unsigned short head;
-
- STAT(++unw.stat.script.news);
-
- /*
- * Can't (easily) use cmpxchg() here because of ABA problem
- * that is intrinsic in cmpxchg()...
- */
- head = unw.lru_head;
- script = unw.cache + head;
- unw.lru_head = script->lru_chain;
-
- /*
- * We'd deadlock here if we interrupted a thread that is holding a read lock on
- * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
- * alternative would be to disable interrupts whenever we hold a read-lock, but
- * that seems silly.
- */
- if (!write_trylock(&script->lock))
- return NULL;
-
- /* re-insert script at the tail of the LRU chain: */
- unw.cache[unw.lru_tail].lru_chain = head;
- unw.lru_tail = head;
-
- /* remove the old script from the hash table (if it's there): */
- if (script->ip) {
- index = hash(script->ip);
- tmp = unw.cache + unw.hash[index];
- prev = NULL;
- while (1) {
- if (tmp == script) {
- if (prev)
- prev->coll_chain = tmp->coll_chain;
- else
- unw.hash[index] = tmp->coll_chain;
- break;
- } else
- prev = tmp;
- if (tmp->coll_chain >= UNW_CACHE_SIZE)
- /* old script wasn't in the hash-table */
- break;
- tmp = unw.cache + tmp->coll_chain;
- }
- }
-
- /* enter new script in the hash table */
- index = hash(ip);
- script->coll_chain = unw.hash[index];
- unw.hash[index] = script - unw.cache;
-
- script->ip = ip; /* set new IP while we're holding the locks */
-
- STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
-
- script->flags = 0;
- script->hint = 0;
- script->count = 0;
- return script;
-}
-
-static void
-script_finalize (struct unw_script *script, struct unw_state_record *sr)
-{
- script->pr_mask = sr->pr_mask;
- script->pr_val = sr->pr_val;
- /*
- * We could down-grade our write-lock on script->lock here but
- * the rwlock API doesn't offer atomic lock downgrading, so
- * we'll just keep the write-lock and release it later when
- * we're done using the script.
- */
-}
-
-static inline void
-script_emit (struct unw_script *script, struct unw_insn insn)
-{
- if (script->count >= UNW_MAX_SCRIPT_LEN) {
- UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
- __func__, UNW_MAX_SCRIPT_LEN);
- return;
- }
- script->insn[script->count++] = insn;
-}
-
-static inline void
-emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
-{
- struct unw_reg_info *r = sr->curr.reg + i;
- enum unw_insn_opcode opc;
- struct unw_insn insn;
- unsigned long val = 0;
-
- switch (r->where) {
- case UNW_WHERE_GR:
- if (r->val >= 32) {
- /* register got spilled to a stacked register */
- opc = UNW_INSN_SETNAT_TYPE;
- val = UNW_NAT_REGSTK;
- } else
- /* register got spilled to a scratch register */
- opc = UNW_INSN_SETNAT_MEMSTK;
- break;
-
- case UNW_WHERE_FR:
- opc = UNW_INSN_SETNAT_TYPE;
- val = UNW_NAT_VAL;
- break;
-
- case UNW_WHERE_BR:
- opc = UNW_INSN_SETNAT_TYPE;
- val = UNW_NAT_NONE;
- break;
-
- case UNW_WHERE_PSPREL:
- case UNW_WHERE_SPREL:
- opc = UNW_INSN_SETNAT_MEMSTK;
- break;
-
- default:
- UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
- __func__, r->where);
- return;
- }
- insn.opc = opc;
- insn.dst = unw.preg_index[i];
- insn.val = val;
- script_emit(script, insn);
-}
-
-static void
-compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
-{
- struct unw_reg_info *r = sr->curr.reg + i;
- enum unw_insn_opcode opc;
- unsigned long val, rval;
- struct unw_insn insn;
- long need_nat_info;
-
- if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
- return;
-
- opc = UNW_INSN_MOVE;
- val = rval = r->val;
- need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
-
- switch (r->where) {
- case UNW_WHERE_GR:
- if (rval >= 32) {
- opc = UNW_INSN_MOVE_STACKED;
- val = rval - 32;
- } else if (rval >= 4 && rval <= 7) {
- if (need_nat_info) {
- opc = UNW_INSN_MOVE2;
- need_nat_info = 0;
- }
- val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
- } else if (rval == 0) {
- opc = UNW_INSN_MOVE_CONST;
- val = 0;
- } else {
- /* register got spilled to a scratch register */
- opc = UNW_INSN_MOVE_SCRATCH;
- val = pt_regs_off(rval);
- }
- break;
-
- case UNW_WHERE_FR:
- if (rval <= 5)
- val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
- else if (rval >= 16 && rval <= 31)
- val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
- else {
- opc = UNW_INSN_MOVE_SCRATCH;
- if (rval <= 11)
- val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
- else
- UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
- __func__, rval);
- }
- break;
-
- case UNW_WHERE_BR:
- if (rval >= 1 && rval <= 5)
- val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
- else {
- opc = UNW_INSN_MOVE_SCRATCH;
- if (rval == 0)
- val = offsetof(struct pt_regs, b0);
- else if (rval == 6)
- val = offsetof(struct pt_regs, b6);
- else
- val = offsetof(struct pt_regs, b7);
- }
- break;
-
- case UNW_WHERE_SPREL:
- opc = UNW_INSN_ADD_SP;
- break;
-
- case UNW_WHERE_PSPREL:
- opc = UNW_INSN_ADD_PSP;
- break;
-
- default:
- UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
- __func__, i, r->where);
- break;
- }
- insn.opc = opc;
- insn.dst = unw.preg_index[i];
- insn.val = val;
- script_emit(script, insn);
- if (need_nat_info)
- emit_nat_info(sr, i, script);
-
- if (i == UNW_REG_PSP) {
- /*
- * info->psp must contain the _value_ of the previous
- * sp, not it's save location. We get this by
- * dereferencing the value we just stored in
- * info->psp:
- */
- insn.opc = UNW_INSN_LOAD;
- insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
- script_emit(script, insn);
- }
-}
-
-static inline const struct unw_table_entry *
-lookup (struct unw_table *table, unsigned long rel_ip)
-{
- const struct unw_table_entry *e = NULL;
- unsigned long lo, hi, mid;
-
- /* do a binary search for right entry: */
- for (lo = 0, hi = table->length; lo < hi; ) {
- mid = (lo + hi) / 2;
- e = &table->array[mid];
- if (rel_ip < e->start_offset)
- hi = mid;
- else if (rel_ip >= e->end_offset)
- lo = mid + 1;
- else
- break;
- }
- if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
- return NULL;
- return e;
-}
-
-/*
- * Build an unwind script that unwinds from state OLD_STATE to the
- * entrypoint of the function that called OLD_STATE.
- */
-static inline struct unw_script *
-build_script (struct unw_frame_info *info)
-{
- const struct unw_table_entry *e = NULL;
- struct unw_script *script = NULL;
- struct unw_labeled_state *ls, *next;
- unsigned long ip = info->ip;
- struct unw_state_record sr;
- struct unw_table *table, *prev;
- struct unw_reg_info *r;
- struct unw_insn insn;
- u8 *dp, *desc_end;
- u64 hdr;
- int i;
- STAT(unsigned long start, parse_start;)
-
- STAT(++unw.stat.script.builds; start = ia64_get_itc());
-
- /* build state record */
- memset(&sr, 0, sizeof(sr));
- for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
- r->when = UNW_WHEN_NEVER;
- sr.pr_val = info->pr;
-
- UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
- script = script_new(ip);
- if (!script) {
- UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__);
- STAT(unw.stat.script.build_time += ia64_get_itc() - start);
- return NULL;
- }
- unw.cache[info->prev_script].hint = script - unw.cache;
-
- /* search the kernels and the modules' unwind tables for IP: */
-
- STAT(parse_start = ia64_get_itc());
-
- prev = NULL;
- for (table = unw.tables; table; table = table->next) {
- if (ip >= table->start && ip < table->end) {
- /*
- * Leave the kernel unwind table at the very front,
- * lest moving it breaks some assumption elsewhere.
- * Otherwise, move the matching table to the second
- * position in the list so that traversals can benefit
- * from commonality in backtrace paths.
- */
- if (prev && prev != unw.tables) {
- /* unw is safe - we're already spinlocked */
- prev->next = table->next;
- table->next = unw.tables->next;
- unw.tables->next = table;
- }
- e = lookup(table, ip - table->segment_base);
- break;
- }
- prev = table;
- }
- if (!e) {
- /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
- UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
- __func__, ip, unw.cache[info->prev_script].ip);
- sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
- sr.curr.reg[UNW_REG_RP].when = -1;
- sr.curr.reg[UNW_REG_RP].val = 0;
- compile_reg(&sr, UNW_REG_RP, script);
- script_finalize(script, &sr);
- STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
- STAT(unw.stat.script.build_time += ia64_get_itc() - start);
- return script;
- }
-
- sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
- + (ip & 0xfUL));
- hdr = *(u64 *) (table->segment_base + e->info_offset);
- dp = (u8 *) (table->segment_base + e->info_offset + 8);
- desc_end = dp + 8*UNW_LENGTH(hdr);
-
- while (!sr.done && dp < desc_end)
- dp = unw_decode(dp, sr.in_body, &sr);
-
- if (sr.when_target > sr.epilogue_start) {
- /*
- * sp has been restored and all values on the memory stack below
- * psp also have been restored.
- */
- sr.curr.reg[UNW_REG_PSP].val = 0;
- sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
- sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
- for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
- if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
- || r->where == UNW_WHERE_SPREL)
- {
- r->val = 0;
- r->where = UNW_WHERE_NONE;
- r->when = UNW_WHEN_NEVER;
- }
- }
-
- script->flags = sr.flags;
-
- /*
- * If RP did't get saved, generate entry for the return link
- * register.
- */
- if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
- sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
- sr.curr.reg[UNW_REG_RP].when = -1;
- sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
- UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
- __func__, ip, sr.curr.reg[UNW_REG_RP].where,
- sr.curr.reg[UNW_REG_RP].val);
- }
-
-#ifdef UNW_DEBUG
- UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
- __func__, table->segment_base + e->start_offset, sr.when_target);
- for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
- if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
- UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
- switch (r->where) {
- case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
- case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
- case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
- case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
- case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
- case UNW_WHERE_NONE:
- UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
- break;
-
- default:
- UNW_DPRINT(1, "BADWHERE(%d)", r->where);
- break;
- }
- UNW_DPRINT(1, "\t\t%d\n", r->when);
- }
- }
-#endif
-
- STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
-
- /* translate state record into unwinder instructions: */
-
- /*
- * First, set psp if we're dealing with a fixed-size frame;
- * subsequent instructions may depend on this value.
- */
- if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
- && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
- && sr.curr.reg[UNW_REG_PSP].val != 0) {
- /* new psp is sp plus frame size */
- insn.opc = UNW_INSN_ADD;
- insn.dst = offsetof(struct unw_frame_info, psp)/8;
- insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
- script_emit(script, insn);
- }
-
- /* determine where the primary UNaT is: */
- if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
- i = UNW_REG_PRI_UNAT_MEM;
- else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
- i = UNW_REG_PRI_UNAT_GR;
- else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
- i = UNW_REG_PRI_UNAT_MEM;
- else
- i = UNW_REG_PRI_UNAT_GR;
-
- compile_reg(&sr, i, script);
-
- for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
- compile_reg(&sr, i, script);
-
- /* free labeled register states & stack: */
-
- STAT(parse_start = ia64_get_itc());
- for (ls = sr.labeled_states; ls; ls = next) {
- next = ls->next;
- free_state_stack(&ls->saved_state);
- free_labeled_state(ls);
- }
- free_state_stack(&sr.curr);
- STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
-
- script_finalize(script, &sr);
- STAT(unw.stat.script.build_time += ia64_get_itc() - start);
- return script;
-}
-
-/*
- * Apply the unwinding actions represented by OPS and update SR to
- * reflect the state that existed upon entry to the function that this
- * unwinder represents.
- */
-static inline void
-run_script (struct unw_script *script, struct unw_frame_info *state)
-{
- struct unw_insn *ip, *limit, next_insn;
- unsigned long opc, dst, val, off;
- unsigned long *s = (unsigned long *) state;
- STAT(unsigned long start;)
-
- STAT(++unw.stat.script.runs; start = ia64_get_itc());
- state->flags = script->flags;
- ip = script->insn;
- limit = script->insn + script->count;
- next_insn = *ip;
-
- while (ip++ < limit) {
- opc = next_insn.opc;
- dst = next_insn.dst;
- val = next_insn.val;
- next_insn = *ip;
-
- redo:
- switch (opc) {
- case UNW_INSN_ADD:
- s[dst] += val;
- break;
-
- case UNW_INSN_MOVE2:
- if (!s[val])
- goto lazy_init;
- s[dst+1] = s[val+1];
- s[dst] = s[val];
- break;
-
- case UNW_INSN_MOVE:
- if (!s[val])
- goto lazy_init;
- s[dst] = s[val];
- break;
-
- case UNW_INSN_MOVE_SCRATCH:
- if (state->pt) {
- s[dst] = (unsigned long) get_scratch_regs(state) + val;
- } else {
- s[dst] = 0;
- UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
- __func__, dst, val);
- }
- break;
-
- case UNW_INSN_MOVE_CONST:
- if (val == 0)
- s[dst] = (unsigned long) &unw.r0;
- else {
- s[dst] = 0;
- UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
- __func__, val);
- }
- break;
-
-
- case UNW_INSN_MOVE_STACKED:
- s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
- val);
- break;
-
- case UNW_INSN_ADD_PSP:
- s[dst] = state->psp + val;
- break;
-
- case UNW_INSN_ADD_SP:
- s[dst] = state->sp + val;
- break;
-
- case UNW_INSN_SETNAT_MEMSTK:
- if (!state->pri_unat_loc)
- state->pri_unat_loc = &state->sw->caller_unat;
- /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
- s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
- break;
-
- case UNW_INSN_SETNAT_TYPE:
- s[dst+1] = val;
- break;
-
- case UNW_INSN_LOAD:
-#ifdef UNW_DEBUG
- if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
- || s[val] < TASK_SIZE)
- {
- UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
- __func__, s[val]);
- break;
- }
-#endif
- s[dst] = *(unsigned long *) s[val];
- break;
- }
- }
- STAT(unw.stat.script.run_time += ia64_get_itc() - start);
- return;
-
- lazy_init:
- off = unw.sw_off[val];
- s[val] = (unsigned long) state->sw + off;
- if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
- /*
- * We're initializing a general register: init NaT info, too. Note that
- * the offset is a multiple of 8 which gives us the 3 bits needed for
- * the type field.
- */
- s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
- goto redo;
-}
-
-static int
-find_save_locs (struct unw_frame_info *info)
-{
- int have_write_lock = 0;
- struct unw_script *scr;
- unsigned long flags = 0;
-
- if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
- /* don't let obviously bad addresses pollute the cache */
- /* FIXME: should really be level 0 but it occurs too often. KAO */
- UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
- info->rp_loc = NULL;
- return -1;
- }
-
- scr = script_lookup(info);
- if (!scr) {
- spin_lock_irqsave(&unw.lock, flags);
- scr = build_script(info);
- if (!scr) {
- spin_unlock_irqrestore(&unw.lock, flags);
- UNW_DPRINT(0,
- "unwind.%s: failed to locate/build unwind script for ip %lx\n",
- __func__, info->ip);
- return -1;
- }
- have_write_lock = 1;
- }
- info->hint = scr->hint;
- info->prev_script = scr - unw.cache;
-
- run_script(scr, info);
-
- if (have_write_lock) {
- write_unlock(&scr->lock);
- spin_unlock_irqrestore(&unw.lock, flags);
- } else
- read_unlock(&scr->lock);
- return 0;
-}
-
-static int
-unw_valid(const struct unw_frame_info *info, unsigned long* p)
-{
- unsigned long loc = (unsigned long)p;
- return (loc >= info->regstk.limit && loc < info->regstk.top) ||
- (loc >= info->memstk.top && loc < info->memstk.limit);
-}
-
-int
-unw_unwind (struct unw_frame_info *info)
-{
- unsigned long prev_ip, prev_sp, prev_bsp;
- unsigned long ip, pr, num_regs;
- STAT(unsigned long start, flags;)
- int retval;
-
- STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
-
- prev_ip = info->ip;
- prev_sp = info->sp;
- prev_bsp = info->bsp;
-
- /* validate the return IP pointer */
- if (!unw_valid(info, info->rp_loc)) {
- /* FIXME: should really be level 0 but it occurs too often. KAO */
- UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
- __func__, info->ip);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
- /* restore the ip */
- ip = info->ip = *info->rp_loc;
- if (ip < GATE_ADDR) {
- UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
-
- /* validate the previous stack frame pointer */
- if (!unw_valid(info, info->pfs_loc)) {
- UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
- /* restore the cfm: */
- info->cfm_loc = info->pfs_loc;
-
- /* restore the bsp: */
- pr = info->pr;
- num_regs = 0;
- if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
- info->pt = info->sp + 16;
- if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
- num_regs = *info->cfm_loc & 0x7f; /* size of frame */
- info->pfs_loc =
- (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
- UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
- } else
- num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
- info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
- if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
- UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
- __func__, info->bsp, info->regstk.limit, info->regstk.top);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
-
- /* restore the sp: */
- info->sp = info->psp;
- if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
- UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
- __func__, info->sp, info->memstk.top, info->memstk.limit);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
-
- if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
- UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
- __func__, ip);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
-
- /* as we unwind, the saved ar.unat becomes the primary unat: */
- info->pri_unat_loc = info->unat_loc;
-
- /* finally, restore the predicates: */
- unw_get_pr(info, &info->pr);
-
- retval = find_save_locs(info);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return retval;
-}
-EXPORT_SYMBOL(unw_unwind);
-
-int
-unw_unwind_to_user (struct unw_frame_info *info)
-{
- unsigned long ip, sp, pr = info->pr;
-
- do {
- unw_get_sp(info, &sp);
- if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
- < IA64_PT_REGS_SIZE) {
- UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
- __func__);
- break;
- }
- if (unw_is_intr_frame(info) &&
- (pr & (1UL << PRED_USER_STACK)))
- return 0;
- if (unw_get_pr (info, &pr) < 0) {
- unw_get_rp(info, &ip);
- UNW_DPRINT(0, "unwind.%s: failed to read "
- "predicate register (ip=0x%lx)\n",
- __func__, ip);
- return -1;
- }
- } while (unw_unwind(info) >= 0);
- unw_get_ip(info, &ip);
- UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
- __func__, ip);
- return -1;
-}
-EXPORT_SYMBOL(unw_unwind_to_user);
-
-static void
-init_frame_info (struct unw_frame_info *info, struct task_struct *t,
- struct switch_stack *sw, unsigned long stktop)
-{
- unsigned long rbslimit, rbstop, stklimit;
- STAT(unsigned long start, flags;)
-
- STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
-
- /*
- * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
- * don't want to do that because it would be slow as each preserved register would
- * have to be processed. Instead, what we do here is zero out the frame info and
- * start the unwind process at the function that created the switch_stack frame.
- * When a preserved value in switch_stack needs to be accessed, run_script() will
- * initialize the appropriate pointer on demand.
- */
- memset(info, 0, sizeof(*info));
-
- rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
- stklimit = (unsigned long) t + IA64_STK_OFFSET;
-
- rbstop = sw->ar_bspstore;
- if (rbstop > stklimit || rbstop < rbslimit)
- rbstop = rbslimit;
-
- if (stktop <= rbstop)
- stktop = rbstop;
- if (stktop > stklimit)
- stktop = stklimit;
-
- info->regstk.limit = rbslimit;
- info->regstk.top = rbstop;
- info->memstk.limit = stklimit;
- info->memstk.top = stktop;
- info->task = t;
- info->sw = sw;
- info->sp = info->psp = stktop;
- info->pr = sw->pr;
- UNW_DPRINT(3, "unwind.%s:\n"
- " task 0x%lx\n"
- " rbs = [0x%lx-0x%lx)\n"
- " stk = [0x%lx-0x%lx)\n"
- " pr 0x%lx\n"
- " sw 0x%lx\n"
- " sp 0x%lx\n",
- __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
- info->pr, (unsigned long) info->sw, info->sp);
- STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
-}
-
-void
-unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
-{
- unsigned long sol;
-
- init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
- info->cfm_loc = &sw->ar_pfs;
- sol = (*info->cfm_loc >> 7) & 0x7f;
- info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
- info->ip = sw->b0;
- UNW_DPRINT(3, "unwind.%s:\n"
- " bsp 0x%lx\n"
- " sol 0x%lx\n"
- " ip 0x%lx\n",
- __func__, info->bsp, sol, info->ip);
- find_save_locs(info);
-}
-
-EXPORT_SYMBOL(unw_init_frame_info);
-
-void
-unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
-{
- struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
-
- UNW_DPRINT(1, "unwind.%s\n", __func__);
- unw_init_frame_info(info, t, sw);
-}
-EXPORT_SYMBOL(unw_init_from_blocked_task);
-
-static void
-init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
- unsigned long gp, const void *table_start, const void *table_end)
-{
- const struct unw_table_entry *start = table_start, *end = table_end;
-
- table->name = name;
- table->segment_base = segment_base;
- table->gp = gp;
- table->start = segment_base + start[0].start_offset;
- table->end = segment_base + end[-1].end_offset;
- table->array = start;
- table->length = end - start;
-}
-
-void *
-unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
- const void *table_start, const void *table_end)
-{
- const struct unw_table_entry *start = table_start, *end = table_end;
- struct unw_table *table;
- unsigned long flags;
-
- if (end - start <= 0) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
- __func__);
- return NULL;
- }
-
- table = kmalloc(sizeof(*table), GFP_USER);
- if (!table)
- return NULL;
-
- init_unwind_table(table, name, segment_base, gp, table_start, table_end);
-
- spin_lock_irqsave(&unw.lock, flags);
- {
- /* keep kernel unwind table at the front (it's searched most commonly): */
- table->next = unw.tables->next;
- unw.tables->next = table;
- }
- spin_unlock_irqrestore(&unw.lock, flags);
-
- return table;
-}
-
-void
-unw_remove_unwind_table (void *handle)
-{
- struct unw_table *table, *prev;
- struct unw_script *tmp;
- unsigned long flags;
- long index;
-
- if (!handle) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
- __func__);
- return;
- }
-
- table = handle;
- if (table == &unw.kernel_table) {
- UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
- "no-can-do!\n", __func__);
- return;
- }
-
- spin_lock_irqsave(&unw.lock, flags);
- {
- /* first, delete the table: */
-
- for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
- if (prev->next == table)
- break;
- if (!prev) {
- UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
- __func__, (void *) table);
- spin_unlock_irqrestore(&unw.lock, flags);
- return;
- }
- prev->next = table->next;
- }
- spin_unlock_irqrestore(&unw.lock, flags);
-
- /* next, remove hash table entries for this table */
-
- for (index = 0; index < UNW_HASH_SIZE; ++index) {
- tmp = unw.cache + unw.hash[index];
- if (unw.hash[index] >= UNW_CACHE_SIZE
- || tmp->ip < table->start || tmp->ip >= table->end)
- continue;
-
- write_lock(&tmp->lock);
- {
- if (tmp->ip >= table->start && tmp->ip < table->end) {
- unw.hash[index] = tmp->coll_chain;
- tmp->ip = 0;
- }
- }
- write_unlock(&tmp->lock);
- }
-
- kfree(table);
-}
-
-static int __init
-create_gate_table (void)
-{
- const struct unw_table_entry *entry, *start, *end;
- unsigned long *lp, segbase = GATE_ADDR;
- size_t info_size, size;
- char *info;
- Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
- int i;
-
- for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
- if (phdr->p_type == PT_IA_64_UNWIND) {
- punw = phdr;
- break;
- }
-
- if (!punw) {
- printk("%s: failed to find gate DSO's unwind table!\n", __func__);
- return 0;
- }
-
- start = (const struct unw_table_entry *) punw->p_vaddr;
- end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
- size = 0;
-
- unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
-
- for (entry = start; entry < end; ++entry)
- size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
- size += 8; /* reserve space for "end of table" marker */
-
- unw.gate_table = kmalloc(size, GFP_KERNEL);
- if (!unw.gate_table) {
- unw.gate_table_size = 0;
- printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
- return 0;
- }
- unw.gate_table_size = size;
-
- lp = unw.gate_table;
- info = (char *) unw.gate_table + size;
-
- for (entry = start; entry < end; ++entry, lp += 3) {
- info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
- info -= info_size;
- memcpy(info, (char *) segbase + entry->info_offset, info_size);
-
- lp[0] = segbase + entry->start_offset; /* start */
- lp[1] = segbase + entry->end_offset; /* end */
- lp[2] = info - (char *) unw.gate_table; /* info */
- }
- *lp = 0; /* end-of-table marker */
- return 0;
-}
-
-__initcall(create_gate_table);
-
-void __init
-unw_init (void)
-{
- extern char __gp[];
- extern void unw_hash_index_t_is_too_narrow (void);
- long i, off;
-
- if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
- unw_hash_index_t_is_too_narrow();
-
- unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
- unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
- unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
- unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
- unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
- unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
- unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
- unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
- for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
- unw.sw_off[unw.preg_index[i]] = off;
- for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
- unw.sw_off[unw.preg_index[i]] = off;
- for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
- unw.sw_off[unw.preg_index[i]] = off;
- for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
- unw.sw_off[unw.preg_index[i]] = off;
-
- for (i = 0; i < UNW_CACHE_SIZE; ++i) {
- if (i > 0)
- unw.cache[i].lru_chain = (i - 1);
- unw.cache[i].coll_chain = -1;
- rwlock_init(&unw.cache[i].lock);
- }
- unw.lru_head = UNW_CACHE_SIZE - 1;
- unw.lru_tail = 0;
-
- init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
- __start_unwind, __end_unwind);
-}
-
-/*
- * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
- *
- * This system call has been deprecated. The new and improved way to get
- * at the kernel's unwind info is via the gate DSO. The address of the
- * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
- *
- * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
- *
- * This system call copies the unwind data into the buffer pointed to by BUF and returns
- * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
- * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
- * unwind data.
- *
- * The first portion of the unwind data contains an unwind table and rest contains the
- * associated unwind info (in no particular order). The unwind table consists of a table
- * of entries of the form:
- *
- * u64 start; (64-bit address of start of function)
- * u64 end; (64-bit address of start of function)
- * u64 info; (BUF-relative offset to unwind info)
- *
- * The end of the unwind table is indicated by an entry with a START address of zero.
- *
- * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
- * on the format of the unwind info.
- *
- * ERRORS
- * EFAULT BUF points outside your accessible address space.
- */
-asmlinkage long
-sys_getunwind (void __user *buf, size_t buf_size)
-{
- if (buf && buf_size >= unw.gate_table_size)
- if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
- return -EFAULT;
- return unw.gate_table_size;
-}
diff --git a/arch/ia64/kernel/unwind_decoder.c b/arch/ia64/kernel/unwind_decoder.c
deleted file mode 100644
index 83f54f7929..0000000000
--- a/arch/ia64/kernel/unwind_decoder.c
+++ /dev/null
@@ -1,460 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2000 Hewlett-Packard Co
- * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Generic IA-64 unwind info decoder.
- *
- * This file is used both by the Linux kernel and objdump. Please keep
- * the two copies of this file in sync.
- *
- * You need to customize the decoder by defining the following
- * macros/constants before including this file:
- *
- * Types:
- * unw_word Unsigned integer type with at least 64 bits
- *
- * Register names:
- * UNW_REG_BSP
- * UNW_REG_BSPSTORE
- * UNW_REG_FPSR
- * UNW_REG_LC
- * UNW_REG_PFS
- * UNW_REG_PR
- * UNW_REG_RNAT
- * UNW_REG_PSP
- * UNW_REG_RP
- * UNW_REG_UNAT
- *
- * Decoder action macros:
- * UNW_DEC_BAD_CODE(code)
- * UNW_DEC_ABI(fmt,abi,context,arg)
- * UNW_DEC_BR_GR(fmt,brmask,gr,arg)
- * UNW_DEC_BR_MEM(fmt,brmask,arg)
- * UNW_DEC_COPY_STATE(fmt,label,arg)
- * UNW_DEC_EPILOGUE(fmt,t,ecount,arg)
- * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg)
- * UNW_DEC_FR_MEM(fmt,frmask,arg)
- * UNW_DEC_GR_GR(fmt,grmask,gr,arg)
- * UNW_DEC_GR_MEM(fmt,grmask,arg)
- * UNW_DEC_LABEL_STATE(fmt,label,arg)
- * UNW_DEC_MEM_STACK_F(fmt,t,size,arg)
- * UNW_DEC_MEM_STACK_V(fmt,t,arg)
- * UNW_DEC_PRIUNAT_GR(fmt,r,arg)
- * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)
- * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)
- * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg)
- * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg)
- * UNW_DEC_PROLOGUE(fmt,body,rlen,arg)
- * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg)
- * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg)
- * UNW_DEC_REG_REG(fmt,src,dst,arg)
- * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg)
- * UNW_DEC_REG_WHEN(fmt,reg,t,arg)
- * UNW_DEC_RESTORE(fmt,t,abreg,arg)
- * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg)
- * UNW_DEC_SPILL_BASE(fmt,pspoff,arg)
- * UNW_DEC_SPILL_MASK(fmt,imaskp,arg)
- * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg)
- * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg)
- * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg)
- * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg)
- * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg)
- * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg)
- */
-
-static unw_word
-unw_decode_uleb128 (unsigned char **dpp)
-{
- unsigned shift = 0;
- unw_word byte, result = 0;
- unsigned char *bp = *dpp;
-
- while (1)
- {
- byte = *bp++;
- result |= (byte & 0x7f) << shift;
- if ((byte & 0x80) == 0)
- break;
- shift += 7;
- }
- *dpp = bp;
- return result;
-}
-
-static unsigned char *
-unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, abreg;
- unw_word t, off;
-
- byte1 = *dp++;
- t = unw_decode_uleb128 (&dp);
- off = unw_decode_uleb128 (&dp);
- abreg = (byte1 & 0x7f);
- if (byte1 & 0x80)
- UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg);
- else
- UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, byte2, abreg, x, ytreg;
- unw_word t;
-
- byte1 = *dp++; byte2 = *dp++;
- t = unw_decode_uleb128 (&dp);
- abreg = (byte1 & 0x7f);
- ytreg = byte2;
- x = (byte1 >> 7) & 1;
- if ((byte1 & 0x80) == 0 && ytreg == 0)
- UNW_DEC_RESTORE(X2, t, abreg, arg);
- else
- UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, byte2, abreg, qp;
- unw_word t, off;
-
- byte1 = *dp++; byte2 = *dp++;
- t = unw_decode_uleb128 (&dp);
- off = unw_decode_uleb128 (&dp);
-
- qp = (byte1 & 0x3f);
- abreg = (byte2 & 0x7f);
-
- if (byte1 & 0x80)
- UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg);
- else
- UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg;
- unw_word t;
-
- byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
- t = unw_decode_uleb128 (&dp);
-
- qp = (byte1 & 0x3f);
- abreg = (byte2 & 0x7f);
- x = (byte2 >> 7) & 1;
- ytreg = byte3;
-
- if ((byte2 & 0x80) == 0 && byte3 == 0)
- UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg);
- else
- UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg)
-{
- int body = (code & 0x20) != 0;
- unw_word rlen;
-
- rlen = (code & 0x1f);
- UNW_DEC_PROLOGUE(R1, body, rlen, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, mask, grsave;
- unw_word rlen;
-
- byte1 = *dp++;
-
- mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
- grsave = (byte1 & 0x7f);
- rlen = unw_decode_uleb128 (&dp);
- UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg)
-{
- unw_word rlen;
-
- rlen = unw_decode_uleb128 (&dp);
- UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char brmask = (code & 0x1f);
-
- UNW_DEC_BR_MEM(P1, brmask, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg)
-{
- if ((code & 0x10) == 0)
- {
- unsigned char byte1 = *dp++;
-
- UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1),
- (byte1 & 0x7f), arg);
- }
- else if ((code & 0x08) == 0)
- {
- unsigned char byte1 = *dp++, r, dst;
-
- r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
- dst = (byte1 & 0x7f);
- switch (r)
- {
- case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break;
- case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break;
- case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break;
- case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break;
- case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break;
- case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break;
- case 6: UNW_DEC_RP_BR(P3, dst, arg); break;
- case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break;
- case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break;
- case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break;
- case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break;
- case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break;
- default: UNW_DEC_BAD_CODE(r); break;
- }
- }
- else if ((code & 0x7) == 0)
- UNW_DEC_SPILL_MASK(P4, dp, arg);
- else if ((code & 0x7) == 1)
- {
- unw_word grmask, frmask, byte1, byte2, byte3;
-
- byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
- grmask = ((byte1 >> 4) & 0xf);
- frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3;
- UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg);
- }
- else
- UNW_DEC_BAD_CODE(code);
- return dp;
-}
-
-static unsigned char *
-unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg)
-{
- int gregs = (code & 0x10) != 0;
- unsigned char mask = (code & 0x0f);
-
- if (gregs)
- UNW_DEC_GR_MEM(P6, mask, arg);
- else
- UNW_DEC_FR_MEM(P6, mask, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char r, byte1, byte2;
- unw_word t, size;
-
- if ((code & 0x10) == 0)
- {
- r = (code & 0xf);
- t = unw_decode_uleb128 (&dp);
- switch (r)
- {
- case 0:
- size = unw_decode_uleb128 (&dp);
- UNW_DEC_MEM_STACK_F(P7, t, size, arg);
- break;
-
- case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break;
- case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break;
- case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break;
- case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break;
- case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break;
- case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break;
- case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break;
- case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break;
- case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break;
- case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break;
- case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break;
- case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break;
- case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break;
- case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break;
- case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break;
- default: UNW_DEC_BAD_CODE(r); break;
- }
- }
- else
- {
- switch (code & 0xf)
- {
- case 0x0: /* p8 */
- {
- r = *dp++;
- t = unw_decode_uleb128 (&dp);
- switch (r)
- {
- case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break;
- case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break;
- case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break;
- case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break;
- case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break;
- case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break;
- case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break;
- case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break;
- case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break;
- case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break;
- case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
- case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
- case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break;
- case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break;
- case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break;
- case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break;
- case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break;
- case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break;
- case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break;
- default: UNW_DEC_BAD_CODE(r); break;
- }
- }
- break;
-
- case 0x1:
- byte1 = *dp++; byte2 = *dp++;
- UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg);
- break;
-
- case 0xf: /* p10 */
- byte1 = *dp++; byte2 = *dp++;
- UNW_DEC_ABI(P10, byte1, byte2, arg);
- break;
-
- case 0x9:
- return unw_decode_x1 (dp, code, arg);
-
- case 0xa:
- return unw_decode_x2 (dp, code, arg);
-
- case 0xb:
- return unw_decode_x3 (dp, code, arg);
-
- case 0xc:
- return unw_decode_x4 (dp, code, arg);
-
- default:
- UNW_DEC_BAD_CODE(code);
- break;
- }
- }
- return dp;
-}
-
-static unsigned char *
-unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg)
-{
- unw_word label = (code & 0x1f);
-
- if ((code & 0x20) != 0)
- UNW_DEC_COPY_STATE(B1, label, arg);
- else
- UNW_DEC_LABEL_STATE(B1, label, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg)
-{
- unw_word t;
-
- t = unw_decode_uleb128 (&dp);
- UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg)
-{
- unw_word t, ecount, label;
-
- if ((code & 0x10) == 0)
- {
- t = unw_decode_uleb128 (&dp);
- ecount = unw_decode_uleb128 (&dp);
- UNW_DEC_EPILOGUE(B3, t, ecount, arg);
- }
- else if ((code & 0x07) == 0)
- {
- label = unw_decode_uleb128 (&dp);
- if ((code & 0x08) != 0)
- UNW_DEC_COPY_STATE(B4, label, arg);
- else
- UNW_DEC_LABEL_STATE(B4, label, arg);
- }
- else
- switch (code & 0x7)
- {
- case 1: return unw_decode_x1 (dp, code, arg);
- case 2: return unw_decode_x2 (dp, code, arg);
- case 3: return unw_decode_x3 (dp, code, arg);
- case 4: return unw_decode_x4 (dp, code, arg);
- default: UNW_DEC_BAD_CODE(code); break;
- }
- return dp;
-}
-
-typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *);
-
-static unw_decoder unw_decode_table[2][8] =
-{
- /* prologue table: */
- {
- unw_decode_r1, /* 0 */
- unw_decode_r1,
- unw_decode_r2,
- unw_decode_r3,
- unw_decode_p1, /* 4 */
- unw_decode_p2_p5,
- unw_decode_p6,
- unw_decode_p7_p10
- },
- {
- unw_decode_r1, /* 0 */
- unw_decode_r1,
- unw_decode_r2,
- unw_decode_r3,
- unw_decode_b1, /* 4 */
- unw_decode_b1,
- unw_decode_b2,
- unw_decode_b3_x4
- }
-};
-
-/*
- * Decode one descriptor and return address of next descriptor.
- */
-static inline unsigned char *
-unw_decode (unsigned char *dp, int inside_body, void *arg)
-{
- unw_decoder decoder;
- unsigned char code;
-
- code = *dp++;
- decoder = unw_decode_table[inside_body][code >> 5];
- dp = (*decoder) (dp, code, arg);
- return dp;
-}
diff --git a/arch/ia64/kernel/unwind_i.h b/arch/ia64/kernel/unwind_i.h
deleted file mode 100644
index 1dd57ba443..0000000000
--- a/arch/ia64/kernel/unwind_i.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Kernel unwind support.
- */
-
-#define UNW_VER(x) ((x) >> 48)
-#define UNW_FLAG_MASK 0x0000ffff00000000
-#define UNW_FLAG_OSMASK 0x0000f00000000000
-#define UNW_FLAG_EHANDLER(x) ((x) & 0x0000000100000000L)
-#define UNW_FLAG_UHANDLER(x) ((x) & 0x0000000200000000L)
-#define UNW_LENGTH(x) ((x) & 0x00000000ffffffffL)
-
-enum unw_register_index {
- /* primary unat: */
- UNW_REG_PRI_UNAT_GR,
- UNW_REG_PRI_UNAT_MEM,
-
- /* register stack */
- UNW_REG_BSP, /* register stack pointer */
- UNW_REG_BSPSTORE,
- UNW_REG_PFS, /* previous function state */
- UNW_REG_RNAT,
- /* memory stack */
- UNW_REG_PSP, /* previous memory stack pointer */
- /* return pointer: */
- UNW_REG_RP,
-
- /* preserved registers: */
- UNW_REG_R4, UNW_REG_R5, UNW_REG_R6, UNW_REG_R7,
- UNW_REG_UNAT, UNW_REG_PR, UNW_REG_LC, UNW_REG_FPSR,
- UNW_REG_B1, UNW_REG_B2, UNW_REG_B3, UNW_REG_B4, UNW_REG_B5,
- UNW_REG_F2, UNW_REG_F3, UNW_REG_F4, UNW_REG_F5,
- UNW_REG_F16, UNW_REG_F17, UNW_REG_F18, UNW_REG_F19,
- UNW_REG_F20, UNW_REG_F21, UNW_REG_F22, UNW_REG_F23,
- UNW_REG_F24, UNW_REG_F25, UNW_REG_F26, UNW_REG_F27,
- UNW_REG_F28, UNW_REG_F29, UNW_REG_F30, UNW_REG_F31,
- UNW_NUM_REGS
-};
-
-struct unw_info_block {
- u64 header;
- u64 desc[]; /* unwind descriptors */
- /* personality routine and language-specific data follow behind descriptors */
-};
-
-struct unw_table {
- struct unw_table *next; /* must be first member! */
- const char *name;
- unsigned long gp; /* global pointer for this load-module */
- unsigned long segment_base; /* base for offsets in the unwind table entries */
- unsigned long start;
- unsigned long end;
- const struct unw_table_entry *array;
- unsigned long length;
-};
-
-enum unw_where {
- UNW_WHERE_NONE, /* register isn't saved at all */
- UNW_WHERE_GR, /* register is saved in a general register */
- UNW_WHERE_FR, /* register is saved in a floating-point register */
- UNW_WHERE_BR, /* register is saved in a branch register */
- UNW_WHERE_SPREL, /* register is saved on memstack (sp-relative) */
- UNW_WHERE_PSPREL, /* register is saved on memstack (psp-relative) */
- /*
- * At the end of each prologue these locations get resolved to
- * UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively:
- */
- UNW_WHERE_SPILL_HOME, /* register is saved in its spill home */
- UNW_WHERE_GR_SAVE /* register is saved in next general register */
-};
-
-#define UNW_WHEN_NEVER 0x7fffffff
-
-struct unw_reg_info {
- unsigned long val; /* save location: register number or offset */
- enum unw_where where; /* where the register gets saved */
- int when; /* when the register gets saved */
-};
-
-struct unw_reg_state {
- struct unw_reg_state *next; /* next (outer) element on state stack */
- struct unw_reg_info reg[UNW_NUM_REGS]; /* register save locations */
-};
-
-struct unw_labeled_state {
- struct unw_labeled_state *next; /* next labeled state (or NULL) */
- unsigned long label; /* label for this state */
- struct unw_reg_state saved_state;
-};
-
-struct unw_state_record {
- unsigned int first_region : 1; /* is this the first region? */
- unsigned int done : 1; /* are we done scanning descriptors? */
- unsigned int any_spills : 1; /* got any register spills? */
- unsigned int in_body : 1; /* are we inside a body (as opposed to a prologue)? */
- unsigned long flags; /* see UNW_FLAG_* in unwind.h */
-
- u8 *imask; /* imask of spill_mask record or NULL */
- unsigned long pr_val; /* predicate values */
- unsigned long pr_mask; /* predicate mask */
- long spill_offset; /* psp-relative offset for spill base */
- int region_start;
- int region_len;
- int epilogue_start;
- int epilogue_count;
- int when_target;
-
- u8 gr_save_loc; /* next general register to use for saving a register */
- u8 return_link_reg; /* branch register in which the return link is passed */
-
- struct unw_labeled_state *labeled_states; /* list of all labeled states */
- struct unw_reg_state curr; /* current state */
-};
-
-enum unw_nat_type {
- UNW_NAT_NONE, /* NaT not represented */
- UNW_NAT_VAL, /* NaT represented by NaT value (fp reg) */
- UNW_NAT_MEMSTK, /* NaT value is in unat word at offset OFF */
- UNW_NAT_REGSTK /* NaT is in rnat */
-};
-
-enum unw_insn_opcode {
- UNW_INSN_ADD, /* s[dst] += val */
- UNW_INSN_ADD_PSP, /* s[dst] = (s.psp + val) */
- UNW_INSN_ADD_SP, /* s[dst] = (s.sp + val) */
- UNW_INSN_MOVE, /* s[dst] = s[val] */
- UNW_INSN_MOVE2, /* s[dst] = s[val]; s[dst+1] = s[val+1] */
- UNW_INSN_MOVE_STACKED, /* s[dst] = ia64_rse_skip(*s.bsp, val) */
- UNW_INSN_SETNAT_MEMSTK, /* s[dst+1].nat.type = MEMSTK;
- s[dst+1].nat.off = *s.pri_unat - s[dst] */
- UNW_INSN_SETNAT_TYPE, /* s[dst+1].nat.type = val */
- UNW_INSN_LOAD, /* s[dst] = *s[val] */
- UNW_INSN_MOVE_SCRATCH, /* s[dst] = scratch reg "val" */
- UNW_INSN_MOVE_CONST, /* s[dst] = constant reg "val" */
-};
-
-struct unw_insn {
- unsigned int opc : 4;
- unsigned int dst : 9;
- signed int val : 19;
-};
-
-/*
- * Preserved general static registers (r4-r7) give rise to two script
- * instructions; everything else yields at most one instruction; at
- * the end of the script, the psp gets popped, accounting for one more
- * instruction.
- */
-#define UNW_MAX_SCRIPT_LEN (UNW_NUM_REGS + 5)
-
-struct unw_script {
- unsigned long ip; /* ip this script is for */
- unsigned long pr_mask; /* mask of predicates script depends on */
- unsigned long pr_val; /* predicate values this script is for */
- rwlock_t lock;
- unsigned int flags; /* see UNW_FLAG_* in unwind.h */
- unsigned short lru_chain; /* used for least-recently-used chain */
- unsigned short coll_chain; /* used for hash collisions */
- unsigned short hint; /* hint for next script to try (or -1) */
- unsigned short count; /* number of instructions in script */
- struct unw_insn insn[UNW_MAX_SCRIPT_LEN];
-};
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
deleted file mode 100644
index 53dfde161c..0000000000
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,224 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#include <linux/pgtable.h>
-#include <asm/cache.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-
-#define EMITS_PT_NOTE
-#define RO_EXCEPTION_TABLE_ALIGN 16
-
-#include <asm-generic/vmlinux.lds.h>
-
-OUTPUT_FORMAT("elf64-ia64-little")
-OUTPUT_ARCH(ia64)
-ENTRY(phys_start)
-jiffies = jiffies_64;
-
-PHDRS {
- text PT_LOAD;
- percpu PT_LOAD;
- data PT_LOAD;
- note PT_NOTE;
- unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
-}
-
-SECTIONS {
- /*
- * unwind exit sections must be discarded before
- * the rest of the sections get included.
- */
- /DISCARD/ : {
- *(.IA_64.unwind.exit.text)
- *(.IA_64.unwind_info.exit.text)
- *(.comment)
- *(.note)
- }
-
- v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
- phys_start = _start - LOAD_OFFSET;
-
- code : {
- } :text
- . = KERNEL_START;
-
- _text = .;
- _stext = .;
-
- .text : AT(ADDR(.text) - LOAD_OFFSET) {
- __start_ivt_text = .;
- *(.text..ivt)
- __end_ivt_text = .;
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- IRQENTRY_TEXT
- SOFTIRQENTRY_TEXT
- *(.gnu.linkonce.t*)
- }
-
- .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
- *(.text2)
- }
-
-#ifdef CONFIG_SMP
- .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
- *(.text..lock)
- }
-#endif
- _etext = .;
-
- /*
- * Read-only data
- */
-
- /* MCA table */
- . = ALIGN(16);
- __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
- __start___mca_table = .;
- *(__mca_table)
- __stop___mca_table = .;
- }
-
- .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
- __start___phys_stack_reg_patchlist = .;
- *(.data..patch.phys_stack_reg)
- __end___phys_stack_reg_patchlist = .;
- }
-
- /*
- * Global data
- */
- _data = .;
-
- /* Unwind info & table: */
- . = ALIGN(8);
- .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
- *(.IA_64.unwind_info*)
- }
- .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
- __start_unwind = .;
- *(.IA_64.unwind*)
- __end_unwind = .;
- } :text :unwind
- code_continues2 : {
- } :text
-
- RO_DATA(4096)
-
- .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
- __start_opd = .;
- *(.opd)
- __end_opd = .;
- }
-
- /*
- * Initialization code and data:
- */
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
-
- INIT_TEXT_SECTION(PAGE_SIZE)
- INIT_DATA_SECTION(16)
-
- .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
- __start___vtop_patchlist = .;
- *(.data..patch.vtop)
- __end___vtop_patchlist = .;
- }
-
- .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
- __start___rse_patchlist = .;
- *(.data..patch.rse)
- __end___rse_patchlist = .;
- }
-
- .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
- __start___mckinley_e9_bundles = .;
- *(.data..patch.mckinley_e9)
- __end___mckinley_e9_bundles = .;
- }
-
-#ifdef CONFIG_SMP
- . = ALIGN(PERCPU_PAGE_SIZE);
- __cpu0_per_cpu = .;
- . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
-#endif
-
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
-
- .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
- PAGE_ALIGNED_DATA(PAGE_SIZE)
- . = ALIGN(PAGE_SIZE);
- __start_gate_section = .;
- *(.data..gate)
- __stop_gate_section = .;
- }
- /*
- * make sure the gate page doesn't expose
- * kernel data
- */
- . = ALIGN(PAGE_SIZE);
-
- /* Per-cpu data: */
- . = ALIGN(PERCPU_PAGE_SIZE);
- PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
- __phys_per_cpu_start = __per_cpu_load;
- /*
- * ensure percpu data fits
- * into percpu page size
- */
- . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
-
- data : {
- } :data
- .data : AT(ADDR(.data) - LOAD_OFFSET) {
- _sdata = .;
- INIT_TASK_DATA(PAGE_SIZE)
- CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
- READ_MOSTLY_DATA(SMP_CACHE_BYTES)
- DATA_DATA
- *(.data1)
- *(.gnu.linkonce.d*)
- CONSTRUCTORS
- }
-
- BUG_TABLE
-
- . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
- .got : AT(ADDR(.got) - LOAD_OFFSET) {
- *(.got.plt)
- *(.got)
- }
- __gp = ADDR(.got) + 0x200000;
-
- /*
- * We want the small data sections together,
- * so single-instruction offsets can access
- * them all, and initialized data all before
- * uninitialized, so we can shorten the
- * on-disk segment size.
- */
- .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
- *(.sdata)
- *(.sdata1)
- *(.srdata)
- }
- _edata = .;
-
- BSS_SECTION(0, 0, 0)
-
- _end = .;
-
- code : {
- } :text
-
- STABS_DEBUG
- DWARF_DEBUG
- ELF_DETAILS
-
- /* Default discards */
- DISCARDS
-}