summaryrefslogtreecommitdiffstats
path: root/arch/mips/sgi-ip27
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/mips/sgi-ip27
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/mips/sgi-ip27/Kconfig40
-rw-r--r--arch/mips/sgi-ip27/Makefile11
-rw-r--r--arch/mips/sgi-ip27/Platform16
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c96
-rw-r--r--arch/mips/sgi-ip27/ip27-common.h20
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c42
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c185
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c147
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c311
-rw-r--r--arch/mips/sgi-ip27/ip27-klconfig.c74
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c129
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c434
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c240
-rw-r--r--arch/mips/sgi-ip27/ip27-reset.c81
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c189
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c160
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c242
17 files changed, 2417 insertions, 0 deletions
diff --git a/arch/mips/sgi-ip27/Kconfig b/arch/mips/sgi-ip27/Kconfig
new file mode 100644
index 000000000..e5b6cadbe
--- /dev/null
+++ b/arch/mips/sgi-ip27/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+choice
+ prompt "Node addressing mode"
+ depends on SGI_IP27
+ default SGI_SN_M_MODE
+
+config SGI_SN_M_MODE
+ bool "IP27 M-Mode"
+ help
+ The nodes of Origin, Onyx, Fuel and Tezro systems can be configured
+ in either N-Modes which allows for more nodes or M-Mode which allows
+ for more memory. Your hardware is almost certainly running in
+ M-Mode, so choose M-mode here.
+
+config SGI_SN_N_MODE
+ bool "IP27 N-Mode"
+ help
+ The nodes of Origin, Onyx, Fuel and Tezro systems can be configured
+ in either N-Modes which allows for more nodes or M-Mode which allows
+ for more memory. Your hardware is almost certainly running in
+ M-Mode, so choose M-mode here.
+
+endchoice
+
+config MAPPED_KERNEL
+ bool "Mapped kernel support"
+ depends on SGI_IP27
+ help
+ Change the way a Linux kernel is loaded into memory on a MIPS64
+ machine. This is required in order to support text replication on
+ NUMA. If you need to understand it, read the source code.
+
+config REPLICATE_KTEXT
+ bool "Kernel text replication support"
+ depends on SGI_IP27
+ select MAPPED_KERNEL
+ help
+ Say Y here to enable replicating the kernel text across multiple
+ nodes in a NUMA cluster. This trades memory for speed.
+
diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile
new file mode 100644
index 000000000..27c14ede1
--- /dev/null
+++ b/arch/mips/sgi-ip27/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the IP27 specific kernel interface routines under Linux.
+#
+
+obj-y := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o \
+ ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \
+ ip27-hubio.o ip27-xtalk.o
+
+obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o
+obj-$(CONFIG_SMP) += ip27-smp.o
diff --git a/arch/mips/sgi-ip27/Platform b/arch/mips/sgi-ip27/Platform
new file mode 100644
index 000000000..e734ee6ab
--- /dev/null
+++ b/arch/mips/sgi-ip27/Platform
@@ -0,0 +1,16 @@
+#
+# SGI-IP27 (Origin200/2000)
+#
+# Set the load address to >= 0xc000000000300000 if you want to leave space for
+# symmon, 0xc00000000001c000 for production kernels. Note that the value must
+# be 16kb aligned or the handling of the current variable will break.
+#
+cflags-$(CONFIG_SGI_IP27) += -I$(srctree)/arch/mips/include/asm/mach-ip27
+ifdef CONFIG_MAPPED_KERNEL
+load-$(CONFIG_SGI_IP27) += 0xc00000004001c000
+OBJCOPYFLAGS := --change-addresses=0x3fffffff80000000
+dataoffset-$(CONFIG_SGI_IP27) += 0x01000000
+else
+load-$(CONFIG_SGI_IP27) += 0xa80000000001c000
+OBJCOPYFLAGS := --change-addresses=0x57ffffff80000000
+endif
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
new file mode 100644
index 000000000..923a63a51
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 by Silicon Graphics
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/signal.h> /* for SIGBUS */
+#include <linux/sched.h> /* schow_regs(), force_sig() */
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+
+#include <asm/ptrace.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/tlbdebug.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+static void dump_hub_information(unsigned long errst0, unsigned long errst1)
+{
+ static char *err_type[2][8] = {
+ { NULL, "Uncached Partial Read PRERR", "DERR", "Read Timeout",
+ NULL, NULL, NULL, NULL },
+ { "WERR", "Uncached Partial Write", "PWERR", "Write Timeout",
+ NULL, NULL, NULL, NULL }
+ };
+ union pi_err_stat0 st0;
+ union pi_err_stat1 st1;
+
+ st0.pi_stat0_word = errst0;
+ st1.pi_stat1_word = errst1;
+
+ if (!st0.pi_stat0_fmt.s0_valid) {
+ pr_info("Hub does not contain valid error information\n");
+ return;
+ }
+
+ pr_info("Hub has valid error information:\n");
+ if (st0.pi_stat0_fmt.s0_ovr_run)
+ pr_info("Overrun is set. Error stack may contain additional "
+ "information.\n");
+ pr_info("Hub error address is %08lx\n",
+ (unsigned long)st0.pi_stat0_fmt.s0_addr);
+ pr_info("Incoming message command 0x%lx\n",
+ (unsigned long)st0.pi_stat0_fmt.s0_cmd);
+ pr_info("Supplemental field of incoming message is 0x%lx\n",
+ (unsigned long)st0.pi_stat0_fmt.s0_supl);
+ pr_info("T5 Rn (for RRB only) is 0x%lx\n",
+ (unsigned long)st0.pi_stat0_fmt.s0_t5_req);
+ pr_info("Error type is %s\n", err_type[st1.pi_stat1_fmt.s1_rw_rb]
+ [st0.pi_stat0_fmt.s0_err_type] ? : "invalid");
+}
+
+int ip27_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ unsigned long errst0, errst1;
+ int data = regs->cp0_cause & 4;
+ int cpu = LOCAL_HUB_L(PI_CPU_NUM);
+
+ if (is_fixup)
+ return MIPS_BE_FIXUP;
+
+ printk("Slice %c got %cbe at 0x%lx\n", 'A' + cpu, data ? 'd' : 'i',
+ regs->cp0_epc);
+ printk("Hub information:\n");
+ printk("ERR_INT_PEND = 0x%06llx\n", LOCAL_HUB_L(PI_ERR_INT_PEND));
+ errst0 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS0_B : PI_ERR_STATUS0_A);
+ errst1 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS1_B : PI_ERR_STATUS1_A);
+ dump_hub_information(errst0, errst1);
+ show_regs(regs);
+ dump_tlb_all();
+ while(1);
+ force_sig(SIGBUS);
+}
+
+void __init ip27_be_init(void)
+{
+ /* XXX Initialize all the Hub & Bridge error handling here. */
+ int cpu = LOCAL_HUB_L(PI_CPU_NUM);
+ int cpuoff = cpu << 8;
+
+ mips_set_be_handler(ip27_be_handler);
+
+ LOCAL_HUB_S(PI_ERR_INT_PEND,
+ cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
+ LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
+ LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
+ LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0); /* Disable error stack */
+ LOCAL_HUB_S(PI_SYSAD_ERRCHK_EN, PI_SYSAD_CHECK_ALL);
+}
diff --git a/arch/mips/sgi-ip27/ip27-common.h b/arch/mips/sgi-ip27/ip27-common.h
new file mode 100644
index 000000000..ed008a084
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __IP27_COMMON_H
+#define __IP27_COMMON_H
+
+extern nasid_t master_nasid;
+
+extern void cpu_node_probe(void);
+extern void hub_rt_clock_event_init(void);
+extern void hub_rtc_init(nasid_t nasid);
+extern void install_cpu_nmi_handler(int slice);
+extern void install_ipi(void);
+extern void ip27_reboot_setup(void);
+extern const struct plat_smp_ops ip27_smp_ops;
+extern unsigned long node_getfirstfree(nasid_t nasid);
+extern void per_cpu_init(void);
+extern void replicate_kernel_text(void);
+extern void setup_replication_mask(void);
+
+#endif /* __IP27_COMMON_H */
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
new file mode 100644
index 000000000..7737a88c6
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -0,0 +1,42 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2002 Ralf Baechle
+ */
+
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ioc3.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#include "ip27-common.h"
+
+#define IOC3_CLK (22000000 / 3)
+#define IOC3_FLAGS (0)
+
+static inline struct ioc3_uartregs *console_uart(void)
+{
+ struct ioc3 *ioc3;
+ nasid_t nasid;
+
+ nasid = (master_nasid == INVALID_NASID) ? get_nasid() : master_nasid;
+ ioc3 = (struct ioc3 *)KL_CONFIG_CH_CONS_INFO(nasid)->memory_base;
+
+ return &ioc3->sregs.uarta;
+}
+
+void prom_putchar(char c)
+{
+ struct ioc3_uartregs *uart = console_uart();
+
+ while ((readb(&uart->iu_lsr) & 0x20) == 0)
+ ;
+ writeb(c, &uart->iu_thr);
+}
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
new file mode 100644
index 000000000..8352eb640
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Christoph Hellwig.
+ *
+ * Support functions for the HUB ASIC - mostly PIO mapping related.
+ */
+
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/mmzone.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/io.h>
+#include <asm/xtalk/xtalk.h>
+
+
+static int force_fire_and_forget = 1;
+
+/**
+ * hub_pio_map - establish a HUB PIO mapping
+ *
+ * @hub: hub to perform PIO mapping on
+ * @widget: widget ID to perform PIO mapping for
+ * @xtalk_addr: xtalk_address that needs to be mapped
+ * @size: size of the PIO mapping
+ *
+ **/
+unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
+ unsigned long xtalk_addr, size_t size)
+{
+ unsigned i;
+
+ /* use small-window mapping if possible */
+ if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
+ return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
+
+ if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
+ printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
+ " too big (%ld)\n",
+ nasid, widget, xtalk_addr, size);
+ return 0;
+ }
+
+ xtalk_addr &= ~(BWIN_SIZE-1);
+ for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
+ if (test_and_set_bit(i, hub_data(nasid)->h_bigwin_used))
+ continue;
+
+ /*
+ * The code below does a PIO write to setup an ITTE entry.
+ *
+ * We need to prevent other CPUs from seeing our updated
+ * memory shadow of the ITTE (in the piomap) until the ITTE
+ * entry is actually set up; otherwise, another CPU might
+ * attempt a PIO prematurely.
+ *
+ * Also, the only way we can know that an entry has been
+ * received by the hub and can be used by future PIO reads/
+ * writes is by reading back the ITTE entry after writing it.
+ *
+ * For these two reasons, we PIO read back the ITTE entry
+ * after we write it.
+ */
+ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
+ __raw_readq(IIO_ITTE_GET(nasid, i));
+
+ return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
+ }
+
+ printk(KERN_WARNING "unable to establish PIO mapping for at"
+ " hub %d widget %d addr 0x%lx\n",
+ nasid, widget, xtalk_addr);
+ return 0;
+}
+
+
+/*
+ * hub_setup_prb(nasid, prbnum, credits, conveyor)
+ *
+ * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
+ * put it into conveyor belt mode with the specified number of credits.
+ */
+static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
+{
+ union iprb_u prb;
+ int prb_offset;
+
+ /*
+ * Get the current register value.
+ */
+ prb_offset = IIO_IOPRB(prbnum);
+ prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
+
+ /*
+ * Clear out some fields.
+ */
+ prb.iprb_ovflow = 1;
+ prb.iprb_bnakctr = 0;
+ prb.iprb_anakctr = 0;
+
+ /*
+ * Enable or disable fire-and-forget mode.
+ */
+ prb.iprb_ff = force_fire_and_forget ? 1 : 0;
+
+ /*
+ * Set the appropriate number of PIO credits for the widget.
+ */
+ prb.iprb_xtalkctr = credits;
+
+ /*
+ * Store the new value to the register.
+ */
+ REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
+}
+
+/**
+ * hub_set_piomode - set pio mode for a given hub
+ *
+ * @nasid: physical node ID for the hub in question
+ *
+ * Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
+ * To do this, we have to make absolutely sure that no PIOs are in progress
+ * so we turn off access to all widgets for the duration of the function.
+ *
+ * XXX - This code should really check what kind of widget we're talking
+ * to. Bridges can only handle three requests, but XG will do more.
+ * How many can crossbow handle to widget 0? We're assuming 1.
+ *
+ * XXX - There is a bug in the crossbow that link reset PIOs do not
+ * return write responses. The easiest solution to this problem is to
+ * leave widget 0 (xbow) in fire-and-forget mode at all times. This
+ * only affects pio's to xbow registers, which should be rare.
+ **/
+static void hub_set_piomode(nasid_t nasid)
+{
+ u64 ii_iowa;
+ union hubii_wcr_u ii_wcr;
+ unsigned i;
+
+ ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
+ REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
+
+ ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
+
+ if (ii_wcr.iwcr_dir_con) {
+ /*
+ * Assume a bridge here.
+ */
+ hub_setup_prb(nasid, 0, 3);
+ } else {
+ /*
+ * Assume a crossbow here.
+ */
+ hub_setup_prb(nasid, 0, 1);
+ }
+
+ /*
+ * XXX - Here's where we should take the widget type into
+ * when account assigning credits.
+ */
+ for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
+ hub_setup_prb(nasid, i, 3);
+
+ REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
+}
+
+/*
+ * hub_pio_init - PIO-related hub initialization
+ *
+ * @hub: hubinfo structure for our hub
+ */
+void hub_pio_init(nasid_t nasid)
+{
+ unsigned i;
+
+ /* initialize big window piomaps for this hub */
+ bitmap_zero(hub_data(nasid)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
+ for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
+ IIO_ITTE_DISABLE(nasid, i);
+
+ hub_set_piomode(nasid);
+}
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
new file mode 100644
index 000000000..a4daf8ccd
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -0,0 +1,147 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/cpumask.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/io.h>
+#include <asm/sgialib.h>
+#include <asm/time.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/types.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ioc3.h>
+#include <asm/mipsregs.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/intr.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/sn/launch.h>
+#include <asm/sn/mapped_kernel.h>
+
+#include "ip27-common.h"
+
+#define CPU_NONE (cpuid_t)-1
+
+static DECLARE_BITMAP(hub_init_mask, MAX_NUMNODES);
+nasid_t master_nasid = INVALID_NASID;
+
+struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
+EXPORT_SYMBOL_GPL(sn_cpu_info);
+
+static void per_hub_init(nasid_t nasid)
+{
+ struct hub_data *hub = hub_data(nasid);
+
+ cpumask_set_cpu(smp_processor_id(), &hub->h_cpus);
+
+ if (test_and_set_bit(nasid, hub_init_mask))
+ return;
+ /*
+ * Set CRB timeout at 5ms, (< PI timeout of 10ms)
+ */
+ REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
+ REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
+
+ hub_rtc_init(nasid);
+
+ if (nasid) {
+ /* copy exception handlers from first node to current node */
+ memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
+ (void *)CKSEG0, 0x200);
+ __flush_cache_all();
+ /* switch to node local exception handlers */
+ REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
+ }
+}
+
+void per_cpu_init(void)
+{
+ int cpu = smp_processor_id();
+ nasid_t nasid = get_nasid();
+
+ clear_c0_status(ST0_IM);
+
+ per_hub_init(nasid);
+
+ pr_info("CPU %d clock is %dMHz.\n", cpu, sn_cpu_info[cpu].p_speed);
+
+ install_ipi();
+
+ /* Install our NMI handler if symmon hasn't installed one. */
+ install_cpu_nmi_handler(cputoslice(cpu));
+
+ enable_percpu_irq(IP27_HUB_PEND0_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP27_HUB_PEND1_IRQ, IRQ_TYPE_NONE);
+}
+
+void __init plat_mem_setup(void)
+{
+ u64 p, e, n_mode;
+ nasid_t nid;
+
+ register_smp_ops(&ip27_smp_ops);
+
+ ip27_reboot_setup();
+
+ /*
+ * hub_rtc init and cpu clock intr enabled for later calibrate_delay.
+ */
+ nid = get_nasid();
+ printk("IP27: Running on node %d.\n", nid);
+
+ p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1;
+ e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1;
+ printk("Node %d has %s primary CPU%s.\n", nid,
+ p ? "a" : "no",
+ e ? ", CPU is running" : "");
+
+ p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1;
+ e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1;
+ printk("Node %d has %s secondary CPU%s.\n", nid,
+ p ? "a" : "no",
+ e ? ", CPU is running" : "");
+
+ /*
+ * Try to catch kernel missconfigurations and give user an
+ * indication what option to select.
+ */
+ n_mode = LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_MORENODES_MASK;
+ printk("Machine is in %c mode.\n", n_mode ? 'N' : 'M');
+#ifdef CONFIG_SGI_SN_N_MODE
+ if (!n_mode)
+ panic("Kernel compiled for M mode.");
+#else
+ if (n_mode)
+ panic("Kernel compiled for N mode.");
+#endif
+
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0UL;
+ set_io_port_base(IO_BASE);
+}
+
+const char *get_system_type(void)
+{
+ return "SGI Origin";
+}
+
+void __init prom_init(void)
+{
+ prom_init_cmdline(fw_arg0, (LONG *)fw_arg1);
+ prom_meminit();
+}
+
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
new file mode 100644
index 000000000..a0dd3bd2b
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
+ *
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999 - 2001 Kanoj Sarcar
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/irq_cpu.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/irq_alloc.h>
+
+struct hub_irq_data {
+ u64 *irq_mask[2];
+ cpuid_t cpu;
+};
+
+static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
+
+static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
+
+static inline int alloc_level(void)
+{
+ int level;
+
+again:
+ level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
+ if (level >= IP27_HUB_IRQ_COUNT)
+ return -ENOSPC;
+
+ if (test_and_set_bit(level, hub_irq_map))
+ goto again;
+
+ return level;
+}
+
+static void enable_hub_irq(struct irq_data *d)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
+
+ set_bit(d->hwirq, mask);
+ __raw_writeq(mask[0], hd->irq_mask[0]);
+ __raw_writeq(mask[1], hd->irq_mask[1]);
+}
+
+static void disable_hub_irq(struct irq_data *d)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
+
+ clear_bit(d->hwirq, mask);
+ __raw_writeq(mask[0], hd->irq_mask[0]);
+ __raw_writeq(mask[1], hd->irq_mask[1]);
+}
+
+static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
+{
+ nasid_t nasid;
+ int cpu;
+
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_any(cpu_online_mask);
+
+ nasid = cpu_to_node(cpu);
+ hd->cpu = cpu;
+ if (!cputoslice(cpu)) {
+ hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
+ hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
+ } else {
+ hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
+ hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
+ }
+}
+
+static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+
+ if (!hd)
+ return -EINVAL;
+
+ if (irqd_is_started(d))
+ disable_hub_irq(d);
+
+ setup_hub_mask(hd, mask);
+
+ if (irqd_is_started(d))
+ enable_hub_irq(d);
+
+ irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
+
+ return 0;
+}
+
+static struct irq_chip hub_irq_type = {
+ .name = "HUB",
+ .irq_mask = disable_hub_irq,
+ .irq_unmask = enable_hub_irq,
+ .irq_set_affinity = set_affinity_hub_irq,
+};
+
+static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct hub_irq_data *hd;
+ struct hub_data *hub;
+ struct irq_desc *desc;
+ int swlevel;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
+
+ hd = kzalloc(sizeof(*hd), GFP_KERNEL);
+ if (!hd)
+ return -ENOMEM;
+
+ swlevel = alloc_level();
+ if (unlikely(swlevel < 0)) {
+ kfree(hd);
+ return -EAGAIN;
+ }
+ irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
+ handle_level_irq, NULL, NULL);
+
+ /* use CPU connected to nearest hub */
+ hub = hub_data(info->nasid);
+ setup_hub_mask(hd, &hub->h_cpus);
+ info->nasid = cpu_to_node(hd->cpu);
+
+ /* Make sure it's not already pending when we connect it. */
+ REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
+
+ desc = irq_to_desc(virq);
+ desc->irq_common_data.node = info->nasid;
+ cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
+
+ return 0;
+}
+
+static void hub_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *irqd;
+
+ if (nr_irqs > 1)
+ return;
+
+ irqd = irq_domain_get_irq_data(domain, virq);
+ if (irqd && irqd->chip_data)
+ kfree(irqd->chip_data);
+}
+
+static const struct irq_domain_ops hub_domain_ops = {
+ .alloc = hub_domain_alloc,
+ .free = hub_domain_free,
+};
+
+/*
+ * This code is unnecessarily complex, because we do
+ * intr enabling. Basically, once we grab the set of intrs we need
+ * to service, we must mask _all_ these interrupts; firstly, to make
+ * sure the same intr does not intr again, causing recursion that
+ * can lead to stack overflow. Secondly, we can not just mask the
+ * one intr we are do_IRQing, because the non-masked intrs in the
+ * first set might intr again, causing multiple servicings of the
+ * same intr. This effect is mostly seen for intercpu intrs.
+ * Kanoj 05.13.00
+ */
+
+static void ip27_do_irq_mask0(struct irq_desc *desc)
+{
+ cpuid_t cpu = smp_processor_id();
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ struct irq_domain *domain;
+ u64 pend0;
+ int ret;
+
+ /* copied from Irix intpend0() */
+ pend0 = LOCAL_HUB_L(PI_INT_PEND0);
+
+ pend0 &= mask[0]; /* Pick intrs we should look at */
+ if (!pend0)
+ return;
+
+#ifdef CONFIG_SMP
+ if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
+ scheduler_ipi();
+ } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
+ scheduler_ipi();
+ } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
+ generic_smp_call_function_interrupt();
+ } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
+ generic_smp_call_function_interrupt();
+ } else
+#endif
+ {
+ domain = irq_desc_get_handler_data(desc);
+ ret = generic_handle_domain_irq(domain, __ffs(pend0));
+ if (ret)
+ spurious_interrupt();
+ }
+
+ LOCAL_HUB_L(PI_INT_PEND0);
+}
+
+static void ip27_do_irq_mask1(struct irq_desc *desc)
+{
+ cpuid_t cpu = smp_processor_id();
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ struct irq_domain *domain;
+ u64 pend1;
+ int ret;
+
+ /* copied from Irix intpend0() */
+ pend1 = LOCAL_HUB_L(PI_INT_PEND1);
+
+ pend1 &= mask[1]; /* Pick intrs we should look at */
+ if (!pend1)
+ return;
+
+ domain = irq_desc_get_handler_data(desc);
+ ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64);
+ if (ret)
+ spurious_interrupt();
+
+ LOCAL_HUB_L(PI_INT_PEND1);
+}
+
+void install_ipi(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ int slice = LOCAL_HUB_L(PI_CPU_NUM);
+ int resched, call;
+
+ resched = CPU_RESCHED_A_IRQ + slice;
+ set_bit(resched, mask);
+ LOCAL_HUB_CLR_INTR(resched);
+
+ call = CPU_CALL_A_IRQ + slice;
+ set_bit(call, mask);
+ LOCAL_HUB_CLR_INTR(call);
+
+ if (slice == 0) {
+ LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
+ LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
+ } else {
+ LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
+ LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
+ }
+}
+
+void __init arch_init_irq(void)
+{
+ struct irq_domain *domain;
+ struct fwnode_handle *fn;
+ int i;
+
+ mips_cpu_irq_init();
+
+ /*
+ * Some interrupts are reserved by hardware or by software convention.
+ * Mark these as reserved right away so they won't be used accidentally
+ * later.
+ */
+ for (i = 0; i <= CPU_CALL_B_IRQ; i++)
+ set_bit(i, hub_irq_map);
+
+ for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
+ set_bit(i, hub_irq_map);
+
+ fn = irq_domain_alloc_named_fwnode("HUB");
+ WARN_ON(fn == NULL);
+ if (!fn)
+ return;
+ domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
+ &hub_domain_ops, NULL);
+ WARN_ON(domain == NULL);
+ if (!domain)
+ return;
+
+ irq_set_default_host(domain);
+
+ irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
+ irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
+ domain);
+ irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
+ irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
+ domain);
+}
diff --git a/arch/mips/sgi-ip27/ip27-klconfig.c b/arch/mips/sgi-ip27/ip27-klconfig.c
new file mode 100644
index 000000000..81a1646e6
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-klconfig.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/param.h>
+#include <linux/timex.h>
+#include <linux/mm.h>
+
+#include <asm/sn/klconfig.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+
+klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
+{
+ int index, j;
+
+ if (kli == (klinfo_t *)NULL) {
+ index = 0;
+ } else {
+ for (j = 0; j < KLCF_NUM_COMPS(brd); j++)
+ if (kli == KLCF_COMP(brd, j))
+ break;
+ index = j;
+ if (index == KLCF_NUM_COMPS(brd)) {
+ printk("find_component: Bad pointer: 0x%p\n", kli);
+ return (klinfo_t *)NULL;
+ }
+ index++; /* next component */
+ }
+
+ for (; index < KLCF_NUM_COMPS(brd); index++) {
+ kli = KLCF_COMP(brd, index);
+ if (KLCF_COMP_TYPE(kli) == struct_type)
+ return kli;
+ }
+
+ /* Didn't find it. */
+ return (klinfo_t *)NULL;
+}
+
+klinfo_t *find_first_component(lboard_t *brd, unsigned char struct_type)
+{
+ return find_component(brd, (klinfo_t *)NULL, struct_type);
+}
+
+lboard_t *find_lboard(lboard_t *start, unsigned char brd_type)
+{
+ /* Search all boards stored on this node. */
+ while (start) {
+ if (start->brd_type == brd_type)
+ return start;
+ start = KLCF_NEXT(start);
+ }
+ /* Didn't find it. */
+ return (lboard_t *)NULL;
+}
+
+lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_type)
+{
+ /* Search all boards stored on this node. */
+ while (start) {
+ if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
+ return start;
+ start = KLCF_NEXT(start);
+ }
+
+ /* Didn't find it. */
+ return (lboard_t *)NULL;
+}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
new file mode 100644
index 000000000..abd7a84df
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ported from IRIX to Linux by Kanoj Sarcar, 06/08/00.
+ * Copyright 2000 - 2001 Silicon Graphics, Inc.
+ * Copyright 2000 - 2001 Kanoj Sarcar (kanoj@sgi.com)
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/kernel.h>
+#include <linux/nodemask.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+#include <asm/sections.h>
+#include <asm/sn/types.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/mapped_kernel.h>
+
+#include "ip27-common.h"
+
+static nodemask_t ktext_repmask;
+
+/*
+ * XXX - This needs to be much smarter about where it puts copies of the
+ * kernel. For example, we should never put a copy on a headless node,
+ * and we should respect the topology of the machine.
+ */
+void __init setup_replication_mask(void)
+{
+ /* Set only the master cnode's bit. The master cnode is always 0. */
+ nodes_clear(ktext_repmask);
+ node_set(0, ktext_repmask);
+
+#ifdef CONFIG_REPLICATE_KTEXT
+#ifndef CONFIG_MAPPED_KERNEL
+#error Kernel replication works with mapped kernel support. No calias support.
+#endif
+ {
+ nasid_t nasid;
+
+ for_each_online_node(nasid) {
+ if (nasid == 0)
+ continue;
+ /* Advertise that we have a copy of the kernel */
+ node_set(nasid, ktext_repmask);
+ }
+ }
+#endif
+ /* Set up a GDA pointer to the replication mask. */
+ GDA->g_ktext_repmask = &ktext_repmask;
+}
+
+
+static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid)
+{
+ kern_vars_t *kvp;
+
+ kvp = &hub_data(client_nasid)->kern_vars;
+
+ KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp;
+
+ kvp->kv_magic = KV_MAGIC;
+ kvp->kv_ro_nasid = server_nasid;
+ kvp->kv_rw_nasid = master_nasid;
+ kvp->kv_ro_baseaddr = NODE_CAC_BASE(server_nasid);
+ kvp->kv_rw_baseaddr = NODE_CAC_BASE(master_nasid);
+ printk("REPLICATION: ON nasid %d, ktext from nasid %d, kdata from nasid %d\n", client_nasid, server_nasid, master_nasid);
+}
+
+/* XXX - When the BTE works, we should use it instead of this. */
+static __init void copy_kernel(nasid_t dest_nasid)
+{
+ unsigned long dest_kern_start, source_start, source_end, kern_size;
+
+ source_start = (unsigned long) _stext;
+ source_end = (unsigned long) _etext;
+ kern_size = source_end - source_start;
+
+ dest_kern_start = CHANGE_ADDR_NASID(MAPPED_KERN_RO_TO_K0(source_start),
+ dest_nasid);
+ memcpy((void *)dest_kern_start, (void *)source_start, kern_size);
+}
+
+void __init replicate_kernel_text(void)
+{
+ nasid_t client_nasid;
+ nasid_t server_nasid;
+
+ server_nasid = master_nasid;
+
+ /* Record where the master node should get its kernel text */
+ set_ktext_source(master_nasid, master_nasid);
+
+ for_each_online_node(client_nasid) {
+ if (client_nasid == 0)
+ continue;
+
+ /* Check if this node should get a copy of the kernel */
+ if (node_isset(client_nasid, ktext_repmask)) {
+ server_nasid = client_nasid;
+ copy_kernel(server_nasid);
+ }
+
+ /* Record where this node should get its kernel text */
+ set_ktext_source(client_nasid, server_nasid);
+ }
+}
+
+/*
+ * Return pfn of first free page of memory on a node. PROM may allocate
+ * data structures on the first couple of pages of the first slot of each
+ * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
+ */
+unsigned long node_getfirstfree(nasid_t nasid)
+{
+ unsigned long loadbase = REP_BASE;
+ unsigned long offset;
+
+#ifdef CONFIG_MAPPED_KERNEL
+ loadbase += 16777216;
+#endif
+ offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
+ if ((nasid == 0) || (node_isset(nasid, ktext_repmask)))
+ return TO_NODE(nasid, offset) >> PAGE_SHIFT;
+ else
+ return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
+}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
new file mode 100644
index 000000000..f79c48393
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -0,0 +1,434 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2000 by Silicon Graphics, Inc.
+ * Copyright (C) 2004 by Christoph Hellwig
+ *
+ * On SGI IP27 the ARC memory configuration data is completely bogus but
+ * alternate easier to use mechanisms are available.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/export.h>
+#include <linux/nodemask.h>
+#include <linux/swap.h>
+#include <linux/pfn.h>
+#include <linux/highmem.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+
+#include <asm/sn/arch.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klconfig.h>
+
+#include "ip27-common.h"
+
+#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
+#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
+
+struct node_data *__node_data[MAX_NUMNODES];
+
+EXPORT_SYMBOL(__node_data);
+
+static u64 gen_region_mask(void)
+{
+ int region_shift;
+ u64 region_mask;
+ nasid_t nasid;
+
+ region_shift = get_region_shift();
+ region_mask = 0;
+ for_each_online_node(nasid)
+ region_mask |= BIT_ULL(nasid >> region_shift);
+
+ return region_mask;
+}
+
+#define rou_rflag rou_flags
+
+static int router_distance;
+
+static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
+{
+ klrou_t *router;
+ lboard_t *brd;
+ int port;
+
+ if (router_a->rou_rflag == 1)
+ return;
+
+ if (depth >= router_distance)
+ return;
+
+ router_a->rou_rflag = 1;
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router_a->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router_a->rou_port[port].port_nasid,
+ router_a->rou_port[port].port_offset);
+
+ if (brd->brd_type == KLTYPE_ROUTER) {
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+ if (router == router_b) {
+ if (depth < router_distance)
+ router_distance = depth;
+ }
+ else
+ router_recurse(router, router_b, depth + 1);
+ }
+ }
+
+ router_a->rou_rflag = 0;
+}
+
+unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
+EXPORT_SYMBOL(__node_distances);
+
+static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
+{
+ klrou_t *router, *router_a = NULL, *router_b = NULL;
+ lboard_t *brd, *dest_brd;
+ nasid_t nasid;
+ int port;
+
+ /* Figure out which routers nodes in question are connected to */
+ for_each_online_node(nasid) {
+ brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
+ KLTYPE_ROUTER);
+
+ if (!brd)
+ continue;
+
+ do {
+ if (brd->brd_flags & DUPLICATE_BOARD)
+ continue;
+
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+ router->rou_rflag = 0;
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router->rou_port[port].port_nasid,
+ router->rou_port[port].port_offset);
+
+ if (dest_brd->brd_type == KLTYPE_IP27) {
+ if (dest_brd->brd_nasid == nasid_a)
+ router_a = router;
+ if (dest_brd->brd_nasid == nasid_b)
+ router_b = router;
+ }
+ }
+
+ } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
+ }
+
+ if (nasid_a == nasid_b)
+ return LOCAL_DISTANCE;
+
+ if (router_a == router_b)
+ return LOCAL_DISTANCE + 1;
+
+ if (router_a == NULL) {
+ pr_info("node_distance: router_a NULL\n");
+ return 255;
+ }
+ if (router_b == NULL) {
+ pr_info("node_distance: router_b NULL\n");
+ return 255;
+ }
+
+ router_distance = 100;
+ router_recurse(router_a, router_b, 2);
+
+ return LOCAL_DISTANCE + router_distance;
+}
+
+static void __init init_topology_matrix(void)
+{
+ nasid_t row, col;
+
+ for (row = 0; row < MAX_NUMNODES; row++)
+ for (col = 0; col < MAX_NUMNODES; col++)
+ __node_distances[row][col] = -1;
+
+ for_each_online_node(row) {
+ for_each_online_node(col) {
+ __node_distances[row][col] =
+ compute_node_distance(row, col);
+ }
+ }
+}
+
+static void __init dump_topology(void)
+{
+ nasid_t nasid;
+ lboard_t *brd, *dest_brd;
+ int port;
+ int router_num = 0;
+ klrou_t *router;
+ nasid_t row, col;
+
+ pr_info("************** Topology ********************\n");
+
+ pr_info(" ");
+ for_each_online_node(col)
+ pr_cont("%02d ", col);
+ pr_cont("\n");
+ for_each_online_node(row) {
+ pr_info("%02d ", row);
+ for_each_online_node(col)
+ pr_cont("%2d ", node_distance(row, col));
+ pr_cont("\n");
+ }
+
+ for_each_online_node(nasid) {
+ brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
+ KLTYPE_ROUTER);
+
+ if (!brd)
+ continue;
+
+ do {
+ if (brd->brd_flags & DUPLICATE_BOARD)
+ continue;
+ pr_cont("Router %d:", router_num);
+ router_num++;
+
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router->rou_port[port].port_nasid,
+ router->rou_port[port].port_offset);
+
+ if (dest_brd->brd_type == KLTYPE_IP27)
+ pr_cont(" %d", dest_brd->brd_nasid);
+ if (dest_brd->brd_type == KLTYPE_ROUTER)
+ pr_cont(" r");
+ }
+ pr_cont("\n");
+
+ } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
+ }
+}
+
+static unsigned long __init slot_getbasepfn(nasid_t nasid, int slot)
+{
+ return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+}
+
+static unsigned long __init slot_psize_compute(nasid_t nasid, int slot)
+{
+ lboard_t *brd;
+ klmembnk_t *banks;
+ unsigned long size;
+
+ /* Find the node board */
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+ if (!brd)
+ return 0;
+
+ /* Get the memory bank structure */
+ banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
+ if (!banks)
+ return 0;
+
+ /* Size in _Megabytes_ */
+ size = (unsigned long)banks->membnk_bnksz[slot/4];
+
+ /* hack for 128 dimm banks */
+ if (size <= 128) {
+ if (slot % 4 == 0) {
+ size <<= 20; /* size in bytes */
+ return size >> PAGE_SHIFT;
+ } else
+ return 0;
+ } else {
+ size /= 4;
+ size <<= 20;
+ return size >> PAGE_SHIFT;
+ }
+}
+
+static void __init mlreset(void)
+{
+ u64 region_mask;
+ nasid_t nasid;
+
+ master_nasid = get_nasid();
+
+ /*
+ * Probe for all CPUs - this creates the cpumask and sets up the
+ * mapping tables. We need to do this as early as possible.
+ */
+#ifdef CONFIG_SMP
+ cpu_node_probe();
+#endif
+
+ init_topology_matrix();
+ dump_topology();
+
+ region_mask = gen_region_mask();
+
+ setup_replication_mask();
+
+ /*
+ * Set all nodes' calias sizes to 8k
+ */
+ for_each_online_node(nasid) {
+ /*
+ * Always have node 0 in the region mask, otherwise
+ * CALIAS accesses get exceptions since the hub
+ * thinks it is a node 0 address.
+ */
+ REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
+ REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
+
+#ifdef LATER
+ /*
+ * Set up all hubs to have a big window pointing at
+ * widget 0. Memory mode, widget 0, offset 0
+ */
+ REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
+ ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
+ (0 << IIO_ITTE_WIDGET_SHIFT)));
+#endif
+ }
+}
+
+static void __init szmem(void)
+{
+ unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
+ int slot;
+ nasid_t node;
+
+ for_each_online_node(node) {
+ nodebytes = 0;
+ for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
+ slot_psize = slot_psize_compute(node, slot);
+ if (slot == 0)
+ slot0sz = slot_psize;
+ /*
+ * We need to refine the hack when we have replicated
+ * kernel text.
+ */
+ nodebytes += (1LL << SLOT_SHIFT);
+
+ if (!slot_psize)
+ continue;
+
+ if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
+ (slot0sz << PAGE_SHIFT)) {
+ pr_info("Ignoring slot %d onwards on node %d\n",
+ slot, node);
+ slot = MAX_MEM_SLOTS;
+ continue;
+ }
+ memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
+ PFN_PHYS(slot_psize), node,
+ MEMBLOCK_NONE);
+ }
+ }
+}
+
+static void __init node_mem_init(nasid_t node)
+{
+ unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+ unsigned long slot_freepfn = node_getfirstfree(node);
+ unsigned long start_pfn, end_pfn;
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+
+ /*
+ * Allocate the node data structures on the node first.
+ */
+ __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
+ memset(__node_data[node], 0, PAGE_SIZE);
+
+ NODE_DATA(node)->node_start_pfn = start_pfn;
+ NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
+
+ cpumask_clear(&hub_data(node)->h_cpus);
+
+ slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
+ sizeof(struct hub_data));
+
+ memblock_reserve(slot_firstpfn << PAGE_SHIFT,
+ ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
+}
+
+/*
+ * A node with nothing. We use it to avoid any special casing in
+ * cpumask_of_node
+ */
+static struct node_data null_node = {
+ .hub = {
+ .h_cpus = CPU_MASK_NONE
+ }
+};
+
+/*
+ * Currently, the intranode memory hole support assumes that each slot
+ * contains at least 32 MBytes of memory. We assume all bootmem data
+ * fits on the first slot.
+ */
+void __init prom_meminit(void)
+{
+ nasid_t node;
+
+ mlreset();
+ szmem();
+ max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
+ for (node = 0; node < MAX_NUMNODES; node++) {
+ if (node_online(node)) {
+ node_mem_init(node);
+ continue;
+ }
+ __node_data[node] = &null_node;
+ }
+}
+
+extern void setup_zero_pages(void);
+
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+ pagetable_init();
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+ free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+ high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
+ memblock_free_all();
+ setup_zero_pages(); /* This comes from node 0 */
+}
+
+pg_data_t * __init arch_alloc_nodedata(int nid)
+{
+ return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
+}
+
+void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+ __node_data[nid] = (struct node_data *)pgdat;
+}
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
new file mode 100644
index 000000000..84889b57d
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/atomic.h>
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/nmi.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/agent.h>
+
+#if 0
+#define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
+#else
+#define NODE_NUM_CPUS(n) CPUS_PER_NODE
+#endif
+
+#define SEND_NMI(_nasid, _slice) \
+ REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
+
+typedef unsigned long machreg_t;
+
+static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+/*
+ * Let's see what else we need to do here. Set up sp, gp?
+ */
+void nmi_dump(void)
+{
+ void cont_nmi_dump(void);
+
+ cont_nmi_dump();
+}
+
+void install_cpu_nmi_handler(int slice)
+{
+ nmi_t *nmi_addr;
+
+ nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
+ if (nmi_addr->call_addr)
+ return;
+ nmi_addr->magic = NMI_MAGIC;
+ nmi_addr->call_addr = (void *)nmi_dump;
+ nmi_addr->call_addr_c =
+ (void *)(~((unsigned long)(nmi_addr->call_addr)));
+ nmi_addr->call_parm = 0;
+}
+
+/*
+ * Copy the cpu registers which have been saved in the IP27prom format
+ * into the eframe format for the node under consideration.
+ */
+
+void nmi_cpu_eframe_save(nasid_t nasid, int slice)
+{
+ struct reg_struct *nr;
+ int i;
+
+ /* Get the pointer to the current cpu's register set. */
+ nr = (struct reg_struct *)
+ (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
+ slice * IP27_NMI_KREGS_CPU_SIZE);
+
+ pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ pr_emerg("$%2d :", i);
+ pr_cont(" %016lx", nr->gpr[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ pr_cont("\n");
+ }
+
+ pr_emerg("Hi : (value lost)\n");
+ pr_emerg("Lo : (value lost)\n");
+
+ /*
+ * Saved cp0 registers
+ */
+ pr_emerg("epc : %016lx %pS\n", nr->epc, (void *)nr->epc);
+ pr_emerg("%s\n", print_tainted());
+ pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
+ pr_emerg("ra : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
+ pr_emerg("Status: %08lx ", nr->sr);
+
+ if (nr->sr & ST0_KX)
+ pr_cont("KX ");
+ if (nr->sr & ST0_SX)
+ pr_cont("SX ");
+ if (nr->sr & ST0_UX)
+ pr_cont("UX ");
+
+ switch (nr->sr & ST0_KSU) {
+ case KSU_USER:
+ pr_cont("USER ");
+ break;
+ case KSU_SUPERVISOR:
+ pr_cont("SUPERVISOR ");
+ break;
+ case KSU_KERNEL:
+ pr_cont("KERNEL ");
+ break;
+ default:
+ pr_cont("BAD_MODE ");
+ break;
+ }
+
+ if (nr->sr & ST0_ERL)
+ pr_cont("ERL ");
+ if (nr->sr & ST0_EXL)
+ pr_cont("EXL ");
+ if (nr->sr & ST0_IE)
+ pr_cont("IE ");
+ pr_cont("\n");
+
+ pr_emerg("Cause : %08lx\n", nr->cause);
+ pr_emerg("PrId : %08x\n", read_c0_prid());
+ pr_emerg("BadVA : %016lx\n", nr->badva);
+ pr_emerg("CErr : %016lx\n", nr->cache_err);
+ pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
+
+ pr_emerg("\n");
+}
+
+void nmi_dump_hub_irq(nasid_t nasid, int slice)
+{
+ u64 mask0, mask1, pend0, pend1;
+
+ if (slice == 0) { /* Slice A */
+ mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
+ mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
+ } else { /* Slice B */
+ mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
+ mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
+ }
+
+ pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
+ pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
+
+ pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
+ pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
+ pr_emerg("\n\n");
+}
+
+/*
+ * Copy the cpu registers which have been saved in the IP27prom format
+ * into the eframe format for the node under consideration.
+ */
+void nmi_node_eframe_save(nasid_t nasid)
+{
+ int slice;
+
+ if (nasid == INVALID_NASID)
+ return;
+
+ /* Save the registers into eframe for each cpu */
+ for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
+ nmi_cpu_eframe_save(nasid, slice);
+ nmi_dump_hub_irq(nasid, slice);
+ }
+}
+
+/*
+ * Save the nmi cpu registers for all cpus in the system.
+ */
+void
+nmi_eframes_save(void)
+{
+ nasid_t nasid;
+
+ for_each_online_node(nasid)
+ nmi_node_eframe_save(nasid);
+}
+
+void
+cont_nmi_dump(void)
+{
+#ifndef REAL_NMI_SIGNAL
+ static atomic_t nmied_cpus = ATOMIC_INIT(0);
+
+ atomic_inc(&nmied_cpus);
+#endif
+ /*
+ * Only allow 1 cpu to proceed
+ */
+ arch_spin_lock(&nmi_lock);
+
+#ifdef REAL_NMI_SIGNAL
+ /*
+ * Wait up to 15 seconds for the other cpus to respond to the NMI.
+ * If a cpu has not responded after 10 sec, send it 1 additional NMI.
+ * This is for 2 reasons:
+ * - sometimes a MMSC fail to NMI all cpus.
+ * - on 512p SN0 system, the MMSC will only send NMIs to
+ * half the cpus. Unfortunately, we don't know which cpus may be
+ * NMIed - it depends on how the site chooses to configure.
+ *
+ * Note: it has been measure that it takes the MMSC up to 2.3 secs to
+ * send NMIs to all cpus on a 256p system.
+ */
+ for (i=0; i < 1500; i++) {
+ for_each_online_node(node)
+ if (NODEPDA(node)->dump_count == 0)
+ break;
+ if (node == MAX_NUMNODES)
+ break;
+ if (i == 1000) {
+ for_each_online_node(node)
+ if (NODEPDA(node)->dump_count == 0) {
+ cpu = cpumask_first(cpumask_of_node(node));
+ for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
+ CPUMASK_SETB(nmied_cpus, cpu);
+ /*
+ * cputonasid, cputoslice
+ * needs kernel cpuid
+ */
+ SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
+ }
+ }
+
+ }
+ udelay(10000);
+ }
+#else
+ while (atomic_read(&nmied_cpus) != num_online_cpus());
+#endif
+
+ /*
+ * Save the nmi cpu registers for all cpu in the eframe format.
+ */
+ nmi_eframes_save();
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+}
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
new file mode 100644
index 000000000..5ac5ad638
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -0,0 +1,81 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Reset an IP27.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/smp.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+#include <linux/pm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/reboot.h>
+#include <asm/sgialib.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+
+#include "ip27-common.h"
+
+void machine_restart(char *command) __noreturn;
+void machine_halt(void) __noreturn;
+void machine_power_off(void) __noreturn;
+
+#define noreturn while(1); /* Silence gcc. */
+
+/* XXX How to pass the reboot command to the firmware??? */
+static void ip27_machine_restart(char *command)
+{
+#if 0
+ int i;
+#endif
+
+ printk("Reboot started from CPU %d\n", smp_processor_id());
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+#if 0
+ for_each_online_node(i)
+ REMOTE_HUB_S(i, PROMOP_REG, PROMOP_REBOOT);
+#else
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+#endif
+ noreturn;
+}
+
+static void ip27_machine_halt(void)
+{
+ int i;
+
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+ for_each_online_node(i)
+ REMOTE_HUB_S(i, PROMOP_REG, PROMOP_RESTART);
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+ noreturn;
+}
+
+static void ip27_machine_power_off(void)
+{
+ /* To do ... */
+ noreturn;
+}
+
+void ip27_reboot_setup(void)
+{
+ _machine_restart = ip27_machine_restart;
+ _machine_halt = ip27_machine_halt;
+ pm_power_off = ip27_machine_power_off;
+}
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
new file mode 100644
index 000000000..5d2652a1d
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -0,0 +1,189 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/topology.h>
+#include <linux/nodemask.h>
+
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/launch.h>
+#include <asm/sn/mapped_kernel.h>
+#include <asm/sn/types.h>
+
+#include "ip27-common.h"
+
+static int node_scan_cpus(nasid_t nasid, int highest)
+{
+ static int cpus_found;
+ lboard_t *brd;
+ klcpu_t *acpu;
+ cpuid_t cpuid;
+
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+
+ do {
+ acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
+ while (acpu) {
+ cpuid = acpu->cpu_info.virtid;
+ /* Only let it join in if it's marked enabled */
+ if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
+ (cpus_found != NR_CPUS)) {
+ if (cpuid > highest)
+ highest = cpuid;
+ set_cpu_possible(cpuid, true);
+ cputonasid(cpus_found) = nasid;
+ cputoslice(cpus_found) = acpu->cpu_info.physid;
+ sn_cpu_info[cpus_found].p_speed =
+ acpu->cpu_speed;
+ cpus_found++;
+ }
+ acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
+ KLSTRUCT_CPU);
+ }
+ brd = KLCF_NEXT(brd);
+ if (!brd)
+ break;
+
+ brd = find_lboard(brd, KLTYPE_IP27);
+ } while (brd);
+
+ return highest;
+}
+
+void cpu_node_probe(void)
+{
+ int i, highest = 0;
+ gda_t *gdap = GDA;
+
+ nodes_clear(node_online_map);
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ nasid_t nasid = gdap->g_nasidtable[i];
+ if (nasid == INVALID_NASID)
+ break;
+ node_set_online(nasid);
+ highest = node_scan_cpus(nasid, highest);
+ }
+
+ printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
+}
+
+static __init void intr_clear_all(nasid_t nasid)
+{
+ int i;
+
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
+
+ for (i = 0; i < 128; i++)
+ REMOTE_HUB_CLR_INTR(nasid, i);
+}
+
+static void ip27_send_ipi_single(int destid, unsigned int action)
+{
+ int irq;
+
+ switch (action) {
+ case SMP_RESCHEDULE_YOURSELF:
+ irq = CPU_RESCHED_A_IRQ;
+ break;
+ case SMP_CALL_FUNCTION:
+ irq = CPU_CALL_A_IRQ;
+ break;
+ default:
+ panic("sendintr");
+ }
+
+ irq += cputoslice(destid);
+
+ /*
+ * Set the interrupt bit associated with the CPU we want to
+ * send the interrupt to.
+ */
+ REMOTE_HUB_SEND_INTR(cpu_to_node(destid), irq);
+}
+
+static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ ip27_send_ipi_single(i, action);
+}
+
+static void ip27_init_cpu(void)
+{
+ per_cpu_init();
+}
+
+static void ip27_smp_finish(void)
+{
+ hub_rt_clock_event_init();
+ local_irq_enable();
+}
+
+/*
+ * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
+ * set sp to the kernel stack of the newly created idle process, gp to the proc
+ * struct so that current_thread_info() will work.
+ */
+static int ip27_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned long gp = (unsigned long)task_thread_info(idle);
+ unsigned long sp = __KSTK_TOS(idle);
+
+ LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
+ (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
+ 0, (void *) sp, (void *) gp);
+ return 0;
+}
+
+static void __init ip27_smp_setup(void)
+{
+ nasid_t nasid;
+
+ for_each_online_node(nasid) {
+ if (nasid == 0)
+ continue;
+ intr_clear_all(nasid);
+ }
+
+ replicate_kernel_text();
+
+ /*
+ * PROM sets up system, that boot cpu is always first CPU on nasid 0
+ */
+ cputonasid(0) = 0;
+ cputoslice(0) = LOCAL_HUB_L(PI_CPU_NUM);
+}
+
+static void __init ip27_prepare_cpus(unsigned int max_cpus)
+{
+ /* We already did everything necessary earlier */
+}
+
+const struct plat_smp_ops ip27_smp_ops = {
+ .send_ipi_single = ip27_send_ipi_single,
+ .send_ipi_mask = ip27_send_ipi_mask,
+ .init_secondary = ip27_init_cpu,
+ .smp_finish = ip27_smp_finish,
+ .boot_secondary = ip27_boot_secondary,
+ .smp_setup = ip27_smp_setup,
+ .prepare_cpus = ip27_prepare_cpus,
+ .prepare_boot_cpu = ip27_init_cpu,
+};
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
new file mode 100644
index 000000000..444b5e0e9
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/bcd.h>
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched_clock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/param.h>
+#include <linux/smp.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+
+#include <asm/time.h>
+#include <asm/sgialib.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+
+#include "ip27-common.h"
+
+static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ int slice = cputoslice(cpu);
+ unsigned long cnt;
+
+ cnt = LOCAL_HUB_L(PI_RT_COUNT);
+ cnt += delta;
+ LOCAL_HUB_S(PI_RT_COMPARE_A + PI_COUNT_OFFSET * slice, cnt);
+
+ return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
+static DEFINE_PER_CPU(char [11], hub_rt_name);
+
+static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
+ int slice = cputoslice(cpu);
+
+ /*
+ * Ack
+ */
+ LOCAL_HUB_S(PI_RT_PEND_A + PI_COUNT_OFFSET * slice, 0);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+struct irqaction hub_rt_irqaction = {
+ .handler = hub_rt_counter_handler,
+ .percpu_dev_id = &hub_rt_clockevent,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "hub-rt",
+};
+
+/*
+ * This is a hack; we really need to figure these values out dynamically
+ *
+ * Since 800 ns works very well with various HUB frequencies, such as
+ * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time.
+ *
+ * Ralf: which clock rate is used to feed the counter?
+ */
+#define NSEC_PER_CYCLE 800
+#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
+
+void hub_rt_clock_event_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
+ unsigned char *name = per_cpu(hub_rt_name, cpu);
+
+ sprintf(name, "hub-rt %d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, CYCLES_PER_SEC);
+ cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
+ cd->max_delta_ticks = 0xfffffffffffff;
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->min_delta_ticks = 0x300;
+ cd->rating = 200;
+ cd->irq = IP27_RT_TIMER_IRQ;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = rt_next_event;
+ clockevents_register_device(cd);
+
+ enable_percpu_irq(IP27_RT_TIMER_IRQ, IRQ_TYPE_NONE);
+}
+
+static void __init hub_rt_clock_event_global_init(void)
+{
+ irq_set_handler(IP27_RT_TIMER_IRQ, handle_percpu_devid_irq);
+ irq_set_percpu_devid(IP27_RT_TIMER_IRQ);
+ setup_percpu_irq(IP27_RT_TIMER_IRQ, &hub_rt_irqaction);
+}
+
+static u64 hub_rt_read(struct clocksource *cs)
+{
+ return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
+}
+
+struct clocksource hub_rt_clocksource = {
+ .name = "HUB-RT",
+ .rating = 200,
+ .read = hub_rt_read,
+ .mask = CLOCKSOURCE_MASK(52),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 notrace hub_rt_read_sched_clock(void)
+{
+ return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
+}
+
+static void __init hub_rt_clocksource_init(void)
+{
+ struct clocksource *cs = &hub_rt_clocksource;
+
+ clocksource_register_hz(cs, CYCLES_PER_SEC);
+
+ sched_clock_register(hub_rt_read_sched_clock, 52, CYCLES_PER_SEC);
+}
+
+void __init plat_time_init(void)
+{
+ hub_rt_clocksource_init();
+ hub_rt_clock_event_global_init();
+ hub_rt_clock_event_init();
+}
+
+void hub_rtc_init(nasid_t nasid)
+{
+
+ /*
+ * We only need to initialize the current node.
+ * If this is not the current node then it is a cpuless
+ * node and timeouts will not happen there.
+ */
+ if (get_nasid() == nasid) {
+ LOCAL_HUB_S(PI_RT_EN_A, 1);
+ LOCAL_HUB_S(PI_RT_EN_B, 1);
+ LOCAL_HUB_S(PI_PROF_EN_A, 0);
+ LOCAL_HUB_S(PI_PROF_EN_B, 0);
+ LOCAL_HUB_S(PI_RT_COUNT, 0);
+ LOCAL_HUB_S(PI_RT_PEND_A, 0);
+ LOCAL_HUB_S(PI_RT_PEND_B, 0);
+ }
+}
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
new file mode 100644
index 000000000..5143d1cf8
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silcon Graphics, Inc.
+ * Copyright (C) 2004 Christoph Hellwig.
+ *
+ * Generic XTALK initialization code
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+#include <linux/platform_data/xtalk-bridge.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/types.h>
+#include <asm/sn/klconfig.h>
+#include <asm/pci/bridge.h>
+#include <asm/xtalk/xtalk.h>
+
+
+#define XBOW_WIDGET_PART_NUM 0x0
+#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
+#define BASE_XBOW_PORT 8 /* Lowest external port */
+
+static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
+{
+ struct xtalk_bridge_platform_data *bd;
+ struct sgi_w1_platform_data *wd;
+ struct platform_device *pdev_wd;
+ struct platform_device *pdev_bd;
+ struct resource w1_res;
+ unsigned long offset;
+
+ offset = NODE_OFFSET(nasid);
+
+ wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ if (!wd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ return;
+ }
+
+ snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
+ offset + (widget << SWIN_SIZE_BITS));
+
+ memset(&w1_res, 0, sizeof(w1_res));
+ w1_res.start = offset + (widget << SWIN_SIZE_BITS) +
+ offsetof(struct bridge_regs, b_nic);
+ w1_res.end = w1_res.start + 3;
+ w1_res.flags = IORESOURCE_MEM;
+
+ pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
+ if (!pdev_wd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_kfree_wd;
+ }
+ if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget);
+ goto err_put_pdev_wd;
+ }
+ if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
+ goto err_put_pdev_wd;
+ }
+ if (platform_device_add(pdev_wd)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
+ goto err_put_pdev_wd;
+ }
+ /* platform_device_add_data() duplicates the data */
+ kfree(wd);
+
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_unregister_pdev_wd;
+ }
+ pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+ if (!pdev_bd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_kfree_bd;
+ }
+
+
+ bd->bridge_addr = RAW_NODE_SWIN_BASE(nasid, widget);
+ bd->intr_addr = BIT_ULL(47) + 0x01800000 + PI_INT_PEND_MOD;
+ bd->nasid = nasid;
+ bd->masterwid = masterwid;
+
+ bd->mem.name = "Bridge PCI MEM";
+ bd->mem.start = offset + (widget << SWIN_SIZE_BITS) + BRIDGE_DEVIO0;
+ bd->mem.end = offset + (widget << SWIN_SIZE_BITS) + SWIN_SIZE - 1;
+ bd->mem.flags = IORESOURCE_MEM;
+ bd->mem_offset = offset;
+
+ bd->io.name = "Bridge PCI IO";
+ bd->io.start = offset + (widget << SWIN_SIZE_BITS) + BRIDGE_DEVIO0;
+ bd->io.end = offset + (widget << SWIN_SIZE_BITS) + SWIN_SIZE - 1;
+ bd->io.flags = IORESOURCE_IO;
+ bd->io_offset = offset;
+
+ if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
+ goto err_put_pdev_bd;
+ }
+ if (platform_device_add(pdev_bd)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
+ goto err_put_pdev_bd;
+ }
+ /* platform_device_add_data() duplicates the data */
+ kfree(bd);
+ pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
+ return;
+
+err_put_pdev_bd:
+ platform_device_put(pdev_bd);
+err_kfree_bd:
+ kfree(bd);
+err_unregister_pdev_wd:
+ platform_device_unregister(pdev_wd);
+ return;
+err_put_pdev_wd:
+ platform_device_put(pdev_wd);
+err_kfree_wd:
+ kfree(wd);
+ return;
+}
+
+static int probe_one_port(nasid_t nasid, int widget, int masterwid)
+{
+ widgetreg_t widget_id;
+ xwidget_part_num_t partnum;
+
+ widget_id = *(volatile widgetreg_t *)
+ (RAW_NODE_SWIN_BASE(nasid, widget) + WIDGET_ID);
+ partnum = XWIDGET_PART_NUM(widget_id);
+
+ switch (partnum) {
+ case BRIDGE_WIDGET_PART_NUM:
+ case XBRIDGE_WIDGET_PART_NUM:
+ bridge_platform_create(nasid, widget, masterwid);
+ break;
+ default:
+ pr_info("xtalk:n%d/%d unknown widget (0x%x)\n",
+ nasid, widget, partnum);
+ break;
+ }
+
+ return 0;
+}
+
+static int xbow_probe(nasid_t nasid)
+{
+ lboard_t *brd;
+ klxbow_t *xbow_p;
+ unsigned masterwid, i;
+
+ /*
+ * found xbow, so may have multiple bridges
+ * need to probe xbow
+ */
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_MIDPLANE8);
+ if (!brd)
+ return -ENODEV;
+
+ xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW);
+ if (!xbow_p)
+ return -ENODEV;
+
+ /*
+ * Okay, here's a xbow. Let's arbitrate and find
+ * out if we should initialize it. Set enabled
+ * hub connected at highest or lowest widget as
+ * master.
+ */
+#ifdef WIDGET_A
+ i = HUB_WIDGET_ID_MAX + 1;
+ do {
+ i--;
+ } while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
+ (!XBOW_PORT_IS_ENABLED(xbow_p, i)));
+#else
+ i = HUB_WIDGET_ID_MIN - 1;
+ do {
+ i++;
+ } while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
+ (!XBOW_PORT_IS_ENABLED(xbow_p, i)));
+#endif
+
+ masterwid = i;
+ if (nasid != XBOW_PORT_NASID(xbow_p, i))
+ return 1;
+
+ for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++) {
+ if (XBOW_PORT_IS_ENABLED(xbow_p, i) &&
+ XBOW_PORT_TYPE_IO(xbow_p, i))
+ probe_one_port(nasid, i, masterwid);
+ }
+
+ return 0;
+}
+
+static void xtalk_probe_node(nasid_t nasid)
+{
+ volatile u64 hubreg;
+ xwidget_part_num_t partnum;
+ widgetreg_t widget_id;
+
+ hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
+
+ /* check whether the link is up */
+ if (!(hubreg & IIO_LLP_CSR_IS_UP))
+ return;
+
+ widget_id = *(volatile widgetreg_t *)
+ (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
+ partnum = XWIDGET_PART_NUM(widget_id);
+
+ switch (partnum) {
+ case BRIDGE_WIDGET_PART_NUM:
+ bridge_platform_create(nasid, 0x8, 0xa);
+ break;
+ case XBOW_WIDGET_PART_NUM:
+ case XXBOW_WIDGET_PART_NUM:
+ pr_info("xtalk:n%d/0 xbow widget\n", nasid);
+ xbow_probe(nasid);
+ break;
+ default:
+ pr_info("xtalk:n%d/0 unknown widget (0x%x)\n", nasid, partnum);
+ break;
+ }
+}
+
+static int __init xtalk_init(void)
+{
+ nasid_t nasid;
+
+ for_each_online_node(nasid)
+ xtalk_probe_node(nasid);
+
+ return 0;
+}
+arch_initcall(xtalk_init);