summaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel/sn2
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/ia64/sn/kernel/sn2
parentInitial commit. (diff)
downloadlinux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz
linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile15
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c41
-rw-r--r--arch/ia64/sn/kernel/sn2/io.c101
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c207
-rw-r--r--arch/ia64/sn/kernel/sn2/ptc_deadlock.S92
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c584
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c1004
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c69
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c61
-rw-r--r--arch/ia64/sn/kernel/sn2/timer_interrupt.c60
10 files changed, 2234 insertions, 0 deletions
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
new file mode 100644
index 000000000..3d09108d4
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -0,0 +1,15 @@
+# arch/ia64/sn/kernel/sn2/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
+#
+# sn2 specific kernel files
+#
+
+ccflags-y := -Iarch/ia64/sn/include
+
+obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
+ prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
new file mode 100644
index 000000000..2862cb330
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -0,0 +1,41 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved.
+ *
+ */
+#include <linux/module.h>
+#include <asm/pgalloc.h>
+#include <asm/sn/arch.h>
+
+/**
+ * sn_flush_all_caches - flush a range of address from all caches (incl. L4)
+ * @flush_addr: identity mapped region 7 address to start flushing
+ * @bytes: number of bytes to flush
+ *
+ * Flush a range of addresses from all caches including L4.
+ * All addresses fully or partially contained within
+ * @flush_addr to @flush_addr + @bytes are flushed
+ * from all caches.
+ */
+void
+sn_flush_all_caches(long flush_addr, long bytes)
+{
+ unsigned long addr = flush_addr;
+
+ /* SHub1 requires a cached address */
+ if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED))
+ addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL);
+
+ flush_icache_range(addr, addr + bytes);
+ /*
+ * The last call may have returned before the caches
+ * were actually flushed, so we call it again to make
+ * sure.
+ */
+ flush_icache_range(addr, addr + bytes);
+ mb();
+}
+EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/ia64/sn/kernel/sn2/io.c b/arch/ia64/sn/kernel/sn2/io.c
new file mode 100644
index 000000000..a12c0586d
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/io.c
@@ -0,0 +1,101 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ * The generic kernel requires function pointers to these routines, so
+ * we wrap the inlines from asm/ia64/sn/sn2/io.h here.
+ */
+
+#include <asm/sn/io.h>
+
+#ifdef CONFIG_IA64_GENERIC
+
+#undef __sn_inb
+#undef __sn_inw
+#undef __sn_inl
+#undef __sn_outb
+#undef __sn_outw
+#undef __sn_outl
+#undef __sn_readb
+#undef __sn_readw
+#undef __sn_readl
+#undef __sn_readq
+#undef __sn_readb_relaxed
+#undef __sn_readw_relaxed
+#undef __sn_readl_relaxed
+#undef __sn_readq_relaxed
+
+unsigned int __sn_inb(unsigned long port)
+{
+ return ___sn_inb(port);
+}
+
+unsigned int __sn_inw(unsigned long port)
+{
+ return ___sn_inw(port);
+}
+
+unsigned int __sn_inl(unsigned long port)
+{
+ return ___sn_inl(port);
+}
+
+void __sn_outb(unsigned char val, unsigned long port)
+{
+ ___sn_outb(val, port);
+}
+
+void __sn_outw(unsigned short val, unsigned long port)
+{
+ ___sn_outw(val, port);
+}
+
+void __sn_outl(unsigned int val, unsigned long port)
+{
+ ___sn_outl(val, port);
+}
+
+unsigned char __sn_readb(void __iomem *addr)
+{
+ return ___sn_readb(addr);
+}
+
+unsigned short __sn_readw(void __iomem *addr)
+{
+ return ___sn_readw(addr);
+}
+
+unsigned int __sn_readl(void __iomem *addr)
+{
+ return ___sn_readl(addr);
+}
+
+unsigned long __sn_readq(void __iomem *addr)
+{
+ return ___sn_readq(addr);
+}
+
+unsigned char __sn_readb_relaxed(void __iomem *addr)
+{
+ return ___sn_readb_relaxed(addr);
+}
+
+unsigned short __sn_readw_relaxed(void __iomem *addr)
+{
+ return ___sn_readw_relaxed(addr);
+}
+
+unsigned int __sn_readl_relaxed(void __iomem *addr)
+{
+ return ___sn_readl_relaxed(addr);
+}
+
+unsigned long __sn_readq_relaxed(void __iomem *addr)
+{
+ return ___sn_readq_relaxed(addr);
+}
+
+#endif
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
new file mode 100644
index 000000000..e15457bf2
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -0,0 +1,207 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * Module to export the system's Firmware Interface Tables, including
+ * PROM revision numbers and banners, in /proc
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/nodemask.h>
+#include <asm/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/addrs.h>
+
+MODULE_DESCRIPTION("PROM version reporting for /proc");
+MODULE_AUTHOR("Chad Talbott");
+MODULE_LICENSE("GPL");
+
+/* Standard Intel FIT entry types */
+#define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */
+#define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */
+/* Entries 0x02 through 0x0D reserved by Intel */
+#define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */
+#define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */
+#define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */
+#define FIT_ENTRY_UNUSED 0x7F /* Unused (reserved by Intel?) */
+/* OEM-defined entries range from 0x10 to 0x7E. */
+#define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */
+#define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */
+#define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */
+#define FIT_ENTRY_EFI 0x1F /* EFI entry */
+#define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */
+#define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */
+
+#define FIT_MAJOR_SHIFT (32 + 8)
+#define FIT_MAJOR_MASK ((1 << 8) - 1)
+#define FIT_MINOR_SHIFT 32
+#define FIT_MINOR_MASK ((1 << 8) - 1)
+
+#define FIT_MAJOR(q) \
+ ((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK)
+#define FIT_MINOR(q) \
+ ((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK)
+
+#define FIT_TYPE_SHIFT (32 + 16)
+#define FIT_TYPE_MASK ((1 << 7) - 1)
+
+#define FIT_TYPE(q) \
+ ((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK)
+
+struct fit_type_map_t {
+ unsigned char type;
+ const char *name;
+};
+
+static const struct fit_type_map_t fit_entry_types[] = {
+ {FIT_ENTRY_FIT_HEADER, "FIT Header"},
+ {FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"},
+ {FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"},
+ {FIT_ENTRY_PAL_A, "PAL_A"},
+ {FIT_ENTRY_PAL_B, "PAL_B"},
+ {FIT_ENTRY_SAL_A, "SAL_A"},
+ {FIT_ENTRY_SAL_B, "SAL_B"},
+ {FIT_ENTRY_SALRUNTIME, "SAL runtime"},
+ {FIT_ENTRY_EFI, "EFI"},
+ {FIT_ENTRY_VMLINUX, "Embedded Linux"},
+ {FIT_ENTRY_FPSWA, "Embedded FPSWA"},
+ {FIT_ENTRY_UNUSED, "Unused"},
+ {0xff, "Error"},
+};
+
+static const char *fit_type_name(unsigned char type)
+{
+ struct fit_type_map_t const *mapp;
+
+ for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
+ if (type == mapp->type)
+ return mapp->name;
+
+ if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED))
+ return "OEM type";
+ if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A))
+ return "Reserved";
+
+ return "Unknown type";
+}
+
+static int
+get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
+ char *banner, int banlen)
+{
+ return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen);
+}
+
+
+/*
+ * These two routines display the FIT table for each node.
+ */
+static void dump_fit_entry(struct seq_file *m, unsigned long *fentry)
+{
+ unsigned type;
+
+ type = FIT_TYPE(fentry[1]);
+ seq_printf(m, "%02x %-25s %x.%02x %016lx %u\n",
+ type,
+ fit_type_name(type),
+ FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]),
+ fentry[0],
+ /* mult by sixteen to get size in bytes */
+ (unsigned)(fentry[1] & 0xffffff) * 16);
+}
+
+
+/*
+ * We assume that the fit table will be small enough that we can print
+ * the whole thing into one page. (This is true for our default 16kB
+ * pages -- each entry is about 60 chars wide when printed.) I read
+ * somewhere that the maximum size of the FIT is 128 entries, so we're
+ * OK except for 4kB pages (and no one is going to do that on SN
+ * anyway).
+ */
+static int proc_fit_show(struct seq_file *m, void *v)
+{
+ unsigned long nasid = (unsigned long)m->private;
+ unsigned long fentry[2];
+ int index;
+
+ for (index=0;;index++) {
+ BUG_ON(index * 60 > PAGE_SIZE);
+ if (get_fit_entry(nasid, index, fentry, NULL, 0))
+ break;
+ dump_fit_entry(m, fentry);
+ }
+ return 0;
+}
+
+static int proc_version_show(struct seq_file *m, void *v)
+{
+ unsigned long nasid = (unsigned long)m->private;
+ unsigned long fentry[2];
+ char banner[128];
+ int index;
+
+ for (index = 0; ; index++) {
+ if (get_fit_entry(nasid, index, fentry, banner,
+ sizeof(banner)))
+ return 0;
+ if (FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A)
+ break;
+ }
+
+ seq_printf(m, "%x.%02x\n", FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]));
+
+ if (banner[0])
+ seq_printf(m, "%s\n", banner);
+ return 0;
+}
+
+/* module entry points */
+int __init prominfo_init(void);
+void __exit prominfo_exit(void);
+
+module_init(prominfo_init);
+module_exit(prominfo_exit);
+
+#define NODE_NAME_LEN 11
+
+int __init prominfo_init(void)
+{
+ struct proc_dir_entry *sgi_prominfo_entry;
+ cnodeid_t cnodeid;
+
+ if (!ia64_platform_is("sn2"))
+ return 0;
+
+ sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
+ if (!sgi_prominfo_entry)
+ return -ENOMEM;
+
+ for_each_online_node(cnodeid) {
+ struct proc_dir_entry *dir;
+ unsigned long nasid;
+ char name[NODE_NAME_LEN];
+
+ sprintf(name, "node%d", cnodeid);
+ dir = proc_mkdir(name, sgi_prominfo_entry);
+ if (!dir)
+ continue;
+ nasid = cnodeid_to_nasid(cnodeid);
+ proc_create_single_data("fit", 0, dir, proc_fit_show,
+ (void *)nasid);
+ proc_create_single_data("version", 0, dir, proc_version_show,
+ (void *)nasid);
+ }
+ return 0;
+}
+
+void __exit prominfo_exit(void)
+{
+ remove_proc_subtree("sgi_prominfo", NULL);
+}
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
new file mode 100644
index 000000000..bebbcc4f8
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
@@ -0,0 +1,92 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/types.h>
+#include <asm/sn/shub_mmr.h>
+
+#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
+#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
+#define ALIAS_OFFSET 8
+
+
+ .global sn2_ptc_deadlock_recovery_core
+ .proc sn2_ptc_deadlock_recovery_core
+
+sn2_ptc_deadlock_recovery_core:
+ .regstk 6,0,0,0
+
+ ptc0 = in0
+ data0 = in1
+ ptc1 = in2
+ data1 = in3
+ piowc = in4
+ zeroval = in5
+ piowcphy = r30
+ psrsave = r2
+ scr1 = r16
+ scr2 = r17
+ mask = r18
+
+
+ extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
+ dep piowcphy=-1,piowcphy,63,1
+ movl mask=WRITECOUNTMASK
+ mov r8=r0
+
+1:
+ cmp.ne p8,p9=r0,ptc1 // Test for shub type (ptc1 non-null on shub1)
+ // p8 = 1 if shub1, p9 = 1 if shub2
+
+ add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
+ mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
+(p8) st8.rel [scr2]=scr1;;
+(p9) ld8.acq scr1=[scr2];;
+
+5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
+ hint @pause
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b
+
+
+
+ ////////////// BEGIN PHYSICAL MODE ////////////////////
+ mov psrsave=psr // Disable IC (no PMIs)
+ rsm psr.i | psr.dt | psr.ic;;
+ srlz.i;;
+
+ st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
+
+5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
+ hint @pause
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b;;
+
+ tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1
+
+(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
+
+5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
+ hint @pause
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b
+
+ tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+
+ mov psr.l=psrsave;; // Reenable IC
+ srlz.i;;
+ ////////////// END PHYSICAL MODE ////////////////////
+
+(p8) add r8=1,r8
+(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
+
+ br.ret.sptk rp
+ .endp sn2_ptc_deadlock_recovery_core
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
new file mode 100644
index 000000000..b73b0ebf8
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -0,0 +1,584 @@
+/*
+ * SN2 Platform specific SMP Support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/nodemask.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/sal.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/tlb.h>
+#include <asm/numa.h>
+#include <asm/hw_irq.h>
+#include <asm/current.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/rw_mmr.h>
+#include <asm/sn/sn_feature_sets.h>
+
+DEFINE_PER_CPU(struct ptc_stats, ptcstats);
+DECLARE_PER_CPU(struct ptc_stats, ptcstats);
+
+static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
+
+/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */
+static int sn2_flush_opt = 0;
+
+extern unsigned long
+sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long);
+void
+sn2_ptc_deadlock_recovery(nodemask_t, short, short, int,
+ volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long);
+
+/*
+ * Note: some is the following is captured here to make degugging easier
+ * (the macros make more sense if you see the debug patch - not posted)
+ */
+#define sn2_ptctest 0
+#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
+#define max_active_pio(sh1) ((sh1) ? 32 : 7)
+#define reset_max_active_on_deadlock() 1
+#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
+
+struct ptc_stats {
+ unsigned long ptc_l;
+ unsigned long change_rid;
+ unsigned long shub_ptc_flushes;
+ unsigned long nodes_flushed;
+ unsigned long deadlocks;
+ unsigned long deadlocks2;
+ unsigned long lock_itc_clocks;
+ unsigned long shub_itc_clocks;
+ unsigned long shub_itc_clocks_max;
+ unsigned long shub_ptc_flushes_not_my_mm;
+ unsigned long shub_ipi_flushes;
+ unsigned long shub_ipi_flushes_itc_clocks;
+};
+
+#define sn2_ptctest 0
+
+static inline unsigned long wait_piowc(void)
+{
+ volatile unsigned long *piows;
+ unsigned long zeroval, ws;
+
+ piows = pda->pio_write_status_addr;
+ zeroval = pda->pio_write_status_val;
+ do {
+ cpu_relax();
+ } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
+ return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
+}
+
+/**
+ * sn_migrate - SN-specific task migration actions
+ * @task: Task being migrated to new CPU
+ *
+ * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
+ * Context switching user threads which have memory-mapped MMIO may cause
+ * PIOs to issue from separate CPUs, thus the PIO writes must be drained
+ * from the previous CPU's Shub before execution resumes on the new CPU.
+ */
+void sn_migrate(struct task_struct *task)
+{
+ pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu);
+ volatile unsigned long *adr = last_pda->pio_write_status_addr;
+ unsigned long val = last_pda->pio_write_status_val;
+
+ /* Drain PIO writes from old CPU's Shub */
+ while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK)
+ != val))
+ cpu_relax();
+}
+
+void sn_tlb_migrate_finish(struct mm_struct *mm)
+{
+ /* flush_tlb_mm is inefficient if more than 1 users of mm */
+ if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
+ flush_tlb_mm(mm);
+}
+
+static void
+sn2_ipi_flush_all_tlb(struct mm_struct *mm)
+{
+ unsigned long itc;
+
+ itc = ia64_get_itc();
+ smp_flush_tlb_cpumask(*mm_cpumask(mm));
+ itc = ia64_get_itc() - itc;
+ __this_cpu_add(ptcstats.shub_ipi_flushes_itc_clocks, itc);
+ __this_cpu_inc(ptcstats.shub_ipi_flushes);
+}
+
+/**
+ * sn2_global_tlb_purge - globally purge translation cache of virtual address range
+ * @mm: mm_struct containing virtual address range
+ * @start: start of virtual address range
+ * @end: end of virtual address range
+ * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
+ *
+ * Purges the translation caches of all processors of the given virtual address
+ * range.
+ *
+ * Note:
+ * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
+ * - cpu_vm_mask is converted into a nodemask of the nodes containing the
+ * cpus in cpu_vm_mask.
+ * - if only one bit is set in cpu_vm_mask & it is the current cpu & the
+ * process is purging its own virtual address range, then only the
+ * local TLB needs to be flushed. This flushing can be done using
+ * ptc.l. This is the common case & avoids the global spinlock.
+ * - if multiple cpus have loaded the context, then flushing has to be
+ * done with ptc.g/MMRs under protection of the global ptc_lock.
+ */
+
+void
+sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long nbits)
+{
+ int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
+ int mymm = (mm == current->active_mm && mm == current->mm);
+ int use_cpu_ptcga;
+ volatile unsigned long *ptc0, *ptc1;
+ unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
+ short nix;
+ nodemask_t nodes_flushed;
+ int active, max_active, deadlock, flush_opt = sn2_flush_opt;
+
+ if (flush_opt > 2) {
+ sn2_ipi_flush_all_tlb(mm);
+ return;
+ }
+
+ nodes_clear(nodes_flushed);
+ i = 0;
+
+ for_each_cpu(cpu, mm_cpumask(mm)) {
+ cnode = cpu_to_node(cpu);
+ node_set(cnode, nodes_flushed);
+ lcpu = cpu;
+ i++;
+ }
+
+ if (i == 0)
+ return;
+
+ preempt_disable();
+
+ if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) {
+ do {
+ ia64_ptcl(start, nbits << 2);
+ start += (1UL << nbits);
+ } while (start < end);
+ ia64_srlz_i();
+ __this_cpu_inc(ptcstats.ptc_l);
+ preempt_enable();
+ return;
+ }
+
+ if (atomic_read(&mm->mm_users) == 1 && mymm) {
+ flush_tlb_mm(mm);
+ __this_cpu_inc(ptcstats.change_rid);
+ preempt_enable();
+ return;
+ }
+
+ if (flush_opt == 2) {
+ sn2_ipi_flush_all_tlb(mm);
+ preempt_enable();
+ return;
+ }
+
+ itc = ia64_get_itc();
+ nix = nodes_weight(nodes_flushed);
+
+ rr_value = (mm->context << 3) | REGION_NUMBER(start);
+
+ shub1 = is_shub1();
+ if (shub1) {
+ data0 = (1UL << SH1_PTC_0_A_SHFT) |
+ (nbits << SH1_PTC_0_PS_SHFT) |
+ (rr_value << SH1_PTC_0_RID_SHFT) |
+ (1UL << SH1_PTC_0_START_SHFT);
+ ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+ ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+ } else {
+ data0 = (1UL << SH2_PTC_A_SHFT) |
+ (nbits << SH2_PTC_PS_SHFT) |
+ (1UL << SH2_PTC_START_SHFT);
+ ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
+ (rr_value << SH2_PTC_RID_SHFT));
+ ptc1 = NULL;
+ }
+
+
+ mynasid = get_nasid();
+ use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
+ max_active = max_active_pio(shub1);
+
+ itc = ia64_get_itc();
+ spin_lock_irqsave(PTC_LOCK(shub1), flags);
+ itc2 = ia64_get_itc();
+
+ __this_cpu_add(ptcstats.lock_itc_clocks, itc2 - itc);
+ __this_cpu_inc(ptcstats.shub_ptc_flushes);
+ __this_cpu_add(ptcstats.nodes_flushed, nix);
+ if (!mymm)
+ __this_cpu_inc(ptcstats.shub_ptc_flushes_not_my_mm);
+
+ if (use_cpu_ptcga && !mymm) {
+ old_rr = ia64_get_rr(start);
+ ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
+ ia64_srlz_d();
+ }
+
+ wait_piowc();
+ do {
+ if (shub1)
+ data1 = start | (1UL << SH1_PTC_1_START_SHFT);
+ else
+ data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
+ deadlock = 0;
+ active = 0;
+ ibegin = 0;
+ i = 0;
+ for_each_node_mask(cnode, nodes_flushed) {
+ nasid = cnodeid_to_nasid(cnode);
+ if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
+ ia64_ptcga(start, nbits << 2);
+ ia64_srlz_i();
+ } else {
+ ptc0 = CHANGE_NASID(nasid, ptc0);
+ if (ptc1)
+ ptc1 = CHANGE_NASID(nasid, ptc1);
+ pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
+ active++;
+ }
+ if (active >= max_active || i == (nix - 1)) {
+ if ((deadlock = wait_piowc())) {
+ if (flush_opt == 1)
+ goto done;
+ sn2_ptc_deadlock_recovery(nodes_flushed, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
+ if (reset_max_active_on_deadlock())
+ max_active = 1;
+ }
+ active = 0;
+ ibegin = i + 1;
+ }
+ i++;
+ }
+ start += (1UL << nbits);
+ } while (start < end);
+
+done:
+ itc2 = ia64_get_itc() - itc2;
+ __this_cpu_add(ptcstats.shub_itc_clocks, itc2);
+ if (itc2 > __this_cpu_read(ptcstats.shub_itc_clocks_max))
+ __this_cpu_write(ptcstats.shub_itc_clocks_max, itc2);
+
+ if (old_rr) {
+ ia64_set_rr(start, old_rr);
+ ia64_srlz_d();
+ }
+
+ spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
+
+ if (flush_opt == 1 && deadlock) {
+ __this_cpu_inc(ptcstats.deadlocks);
+ sn2_ipi_flush_all_tlb(mm);
+ }
+
+ preempt_enable();
+}
+
+/*
+ * sn2_ptc_deadlock_recovery
+ *
+ * Recover from PTC deadlocks conditions. Recovery requires stepping thru each
+ * TLB flush transaction. The recovery sequence is somewhat tricky & is
+ * coded in assembly language.
+ */
+
+void
+sn2_ptc_deadlock_recovery(nodemask_t nodes, short ib, short ie, int mynasid,
+ volatile unsigned long *ptc0, unsigned long data0,
+ volatile unsigned long *ptc1, unsigned long data1)
+{
+ short nasid, i;
+ int cnode;
+ unsigned long *piows, zeroval, n;
+
+ __this_cpu_inc(ptcstats.deadlocks);
+
+ piows = (unsigned long *) pda->pio_write_status_addr;
+ zeroval = pda->pio_write_status_val;
+
+ i = 0;
+ for_each_node_mask(cnode, nodes) {
+ if (i < ib)
+ goto next;
+
+ if (i > ie)
+ break;
+
+ nasid = cnodeid_to_nasid(cnode);
+ if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
+ goto next;
+
+ ptc0 = CHANGE_NASID(nasid, ptc0);
+ if (ptc1)
+ ptc1 = CHANGE_NASID(nasid, ptc1);
+
+ n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
+ __this_cpu_add(ptcstats.deadlocks2, n);
+next:
+ i++;
+ }
+
+}
+
+/**
+ * sn_send_IPI_phys - send an IPI to a Nasid and slice
+ * @nasid: nasid to receive the interrupt (may be outside partition)
+ * @physid: physical cpuid to receive the interrupt.
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ *
+ * Sends an IPI (interprocessor interrupt) to the processor specified by
+ * @physid
+ *
+ * @delivery_mode can be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
+{
+ long val;
+ unsigned long flags = 0;
+ volatile long *p;
+
+ p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
+ val = (1UL << SH_IPI_INT_SEND_SHFT) |
+ (physid << SH_IPI_INT_PID_SHFT) |
+ ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
+ ((long)vector << SH_IPI_INT_IDX_SHFT) |
+ (0x000feeUL << SH_IPI_INT_BASE_SHFT);
+
+ mb();
+ if (enable_shub_wars_1_1()) {
+ spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+ }
+ pio_phys_write_mmr(p, val);
+ if (enable_shub_wars_1_1()) {
+ wait_piowc();
+ spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+ }
+
+}
+
+EXPORT_SYMBOL(sn_send_IPI_phys);
+
+/**
+ * sn2_send_IPI - send an IPI to a processor
+ * @cpuid: target of the IPI
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ * @redirect: redirect the IPI?
+ *
+ * Sends an IPI (InterProcessor Interrupt) to the processor specified by
+ * @cpuid. @vector specifies the command to send, while @delivery_mode can
+ * be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
+{
+ long physid;
+ int nasid;
+
+ physid = cpu_physical_id(cpuid);
+ nasid = cpuid_to_nasid(cpuid);
+
+ /* the following is used only when starting cpus at boot time */
+ if (unlikely(nasid == -1))
+ ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
+
+ sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/**
+ * sn_cpu_disable_allowed - Determine if a CPU can be disabled.
+ * @cpu - CPU that is requested to be disabled.
+ *
+ * CPU disable is only allowed on SHub2 systems running with a PROM
+ * that supports CPU disable. It is not permitted to disable the boot processor.
+ */
+bool sn_cpu_disable_allowed(int cpu)
+{
+ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) {
+ if (cpu != 0)
+ return true;
+ else
+ printk(KERN_WARNING
+ "Disabling the boot processor is not allowed.\n");
+
+ } else
+ printk(KERN_WARNING
+ "CPU disable is not supported on this system.\n");
+
+ return false;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#ifdef CONFIG_PROC_FS
+
+#define PTC_BASENAME "sgi_sn/ptc_statistics"
+
+static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
+{
+ if (*offset < nr_cpu_ids)
+ return offset;
+ return NULL;
+}
+
+static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
+{
+ (*offset)++;
+ if (*offset < nr_cpu_ids)
+ return offset;
+ return NULL;
+}
+
+static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
+{
+}
+
+static int sn2_ptc_seq_show(struct seq_file *file, void *data)
+{
+ struct ptc_stats *stat;
+ int cpu;
+
+ cpu = *(loff_t *) data;
+
+ if (!cpu) {
+ seq_printf(file,
+ "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n");
+ seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
+ }
+
+ if (cpu < nr_cpu_ids && cpu_online(cpu)) {
+ stat = &per_cpu(ptcstats, cpu);
+ seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
+ stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
+ stat->deadlocks,
+ 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ stat->shub_ptc_flushes_not_my_mm,
+ stat->deadlocks2,
+ stat->shub_ipi_flushes,
+ 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
+ }
+ return 0;
+}
+
+static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data)
+{
+ int cpu;
+ char optstr[64];
+
+ if (count == 0 || count > sizeof(optstr))
+ return -EINVAL;
+ if (copy_from_user(optstr, user, count))
+ return -EFAULT;
+ optstr[count - 1] = '\0';
+ sn2_flush_opt = simple_strtoul(optstr, NULL, 0);
+
+ for_each_online_cpu(cpu)
+ memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats));
+
+ return count;
+}
+
+static const struct seq_operations sn2_ptc_seq_ops = {
+ .start = sn2_ptc_seq_start,
+ .next = sn2_ptc_seq_next,
+ .stop = sn2_ptc_seq_stop,
+ .show = sn2_ptc_seq_show
+};
+
+static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &sn2_ptc_seq_ops);
+}
+
+static const struct file_operations proc_sn2_ptc_operations = {
+ .open = sn2_ptc_proc_open,
+ .read = seq_read,
+ .write = sn2_ptc_proc_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static struct proc_dir_entry *proc_sn2_ptc;
+
+static int __init sn2_ptc_init(void)
+{
+ if (!ia64_platform_is("sn2"))
+ return 0;
+
+ proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
+ NULL, &proc_sn2_ptc_operations);
+ if (!proc_sn2_ptc) {
+ printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
+ return -EINVAL;
+ }
+ spin_lock_init(&sn2_global_ptc_lock);
+ return 0;
+}
+
+static void __exit sn2_ptc_exit(void)
+{
+ remove_proc_entry(PTC_BASENAME, NULL);
+}
+
+module_init(sn2_ptc_init);
+module_exit(sn2_ptc_exit);
+#endif /* CONFIG_PROC_FS */
+
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
new file mode 100644
index 000000000..55febd659
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -0,0 +1,1004 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
+ *
+ * SGI Altix topology and hardware performance monitoring API.
+ * Mark Goodwin <markgw@sgi.com>.
+ *
+ * Creates /proc/sgi_sn/sn_topology (read-only) to export
+ * info about Altix nodes, routers, CPUs and NumaLink
+ * interconnection/topology.
+ *
+ * Also creates a dynamic misc device named "sn_hwperf"
+ * that supports an ioctl interface to call down into SAL
+ * to discover hw objects, topology and to read/write
+ * memory mapped registers, e.g. for performance monitoring.
+ * The "sn_hwperf" device is registered only after the procfs
+ * file is first opened, i.e. only if/when it's needed.
+ *
+ * This API is used by SGI Performance Co-Pilot and other
+ * tools, see http://oss.sgi.com/projects/pcp
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/utsname.h>
+#include <linux/cpumask.h>
+#include <linux/nodemask.h>
+#include <linux/smp.h>
+#include <linux/mutex.h>
+
+#include <asm/processor.h>
+#include <asm/topology.h>
+#include <linux/uaccess.h>
+#include <asm/sal.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/module.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/sn2/sn_hwperf.h>
+#include <asm/sn/addrs.h>
+
+static void *sn_hwperf_salheap = NULL;
+static int sn_hwperf_obj_cnt = 0;
+static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
+static int sn_hwperf_init(void);
+static DEFINE_MUTEX(sn_hwperf_init_mutex);
+
+#define cnode_possible(n) ((n) < num_cnodes)
+
+static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
+{
+ int e;
+ u64 sz;
+ struct sn_hwperf_object_info *objbuf = NULL;
+
+ if ((e = sn_hwperf_init()) < 0) {
+ printk(KERN_ERR "sn_hwperf_init failed: err %d\n", e);
+ goto out;
+ }
+
+ sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
+ objbuf = vmalloc(sz);
+ if (objbuf == NULL) {
+ printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
+ e = -ENOMEM;
+ goto out;
+ }
+
+ e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS,
+ 0, sz, (u64) objbuf, 0, 0, NULL);
+ if (e != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ vfree(objbuf);
+ }
+
+out:
+ *nobj = sn_hwperf_obj_cnt;
+ *ret = objbuf;
+ return e;
+}
+
+static int sn_hwperf_location_to_bpos(char *location,
+ int *rack, int *bay, int *slot, int *slab)
+{
+ char type;
+
+ /* first scan for an old style geoid string */
+ if (sscanf(location, "%03d%c%02d#%d",
+ rack, &type, bay, slab) == 4)
+ *slot = 0;
+ else /* scan for a new bladed geoid string */
+ if (sscanf(location, "%03d%c%02d^%02d#%d",
+ rack, &type, bay, slot, slab) != 5)
+ return -1;
+ /* success */
+ return 0;
+}
+
+static int sn_hwperf_geoid_to_cnode(char *location)
+{
+ int cnode;
+ geoid_t geoid;
+ moduleid_t module_id;
+ int rack, bay, slot, slab;
+ int this_rack, this_bay, this_slot, this_slab;
+
+ if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
+ return -1;
+
+ /*
+ * FIXME: replace with cleaner for_each_XXX macro which addresses
+ * both compute and IO nodes once ACPI3.0 is available.
+ */
+ for (cnode = 0; cnode < num_cnodes; cnode++) {
+ geoid = cnodeid_get_geoid(cnode);
+ module_id = geo_module(geoid);
+ this_rack = MODULE_GET_RACK(module_id);
+ this_bay = MODULE_GET_BPOS(module_id);
+ this_slot = geo_slot(geoid);
+ this_slab = geo_slab(geoid);
+ if (rack == this_rack && bay == this_bay &&
+ slot == this_slot && slab == this_slab) {
+ break;
+ }
+ }
+
+ return cnode_possible(cnode) ? cnode : -1;
+}
+
+static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
+{
+ if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
+ BUG();
+ if (SN_HWPERF_FOREIGN(obj))
+ return -1;
+ return sn_hwperf_geoid_to_cnode(obj->location);
+}
+
+static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj,
+ struct sn_hwperf_object_info *objs)
+{
+ int ordinal;
+ struct sn_hwperf_object_info *p;
+
+ for (ordinal=0, p=objs; p != obj; p++) {
+ if (SN_HWPERF_FOREIGN(p))
+ continue;
+ if (SN_HWPERF_SAME_OBJTYPE(p, obj))
+ ordinal++;
+ }
+
+ return ordinal;
+}
+
+static const char *slabname_node = "node"; /* SHub asic */
+static const char *slabname_ionode = "ionode"; /* TIO asic */
+static const char *slabname_router = "router"; /* NL3R or NL4R */
+static const char *slabname_other = "other"; /* unknown asic */
+
+static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
+ struct sn_hwperf_object_info *objs, int *ordinal)
+{
+ int isnode;
+ const char *slabname = slabname_other;
+
+ if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) {
+ slabname = isnode ? slabname_node : slabname_ionode;
+ *ordinal = sn_hwperf_obj_to_cnode(obj);
+ }
+ else {
+ *ordinal = sn_hwperf_generic_ordinal(obj, objs);
+ if (SN_HWPERF_IS_ROUTER(obj))
+ slabname = slabname_router;
+ }
+
+ return slabname;
+}
+
+static void print_pci_topology(struct seq_file *s)
+{
+ char *p;
+ size_t sz;
+ int e;
+
+ for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) {
+ if (!(p = kmalloc(sz, GFP_KERNEL)))
+ break;
+ e = ia64_sn_ioif_get_pci_topology(__pa(p), sz);
+ if (e == SALRET_OK)
+ seq_puts(s, p);
+ kfree(p);
+ if (e == SALRET_OK || e == SALRET_NOT_IMPLEMENTED)
+ break;
+ }
+}
+
+static inline int sn_hwperf_has_cpus(cnodeid_t node)
+{
+ return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node);
+}
+
+static inline int sn_hwperf_has_mem(cnodeid_t node)
+{
+ return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages;
+}
+
+static struct sn_hwperf_object_info *
+sn_hwperf_findobj_id(struct sn_hwperf_object_info *objbuf,
+ int nobj, int id)
+{
+ int i;
+ struct sn_hwperf_object_info *p = objbuf;
+
+ for (i=0; i < nobj; i++, p++) {
+ if (p->id == id)
+ return p;
+ }
+
+ return NULL;
+
+}
+
+static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objbuf,
+ int nobj, cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node)
+{
+ int e;
+ struct sn_hwperf_object_info *nodeobj = NULL;
+ struct sn_hwperf_object_info *op;
+ struct sn_hwperf_object_info *dest;
+ struct sn_hwperf_object_info *router;
+ struct sn_hwperf_port_info ptdata[16];
+ int sz, i, j;
+ cnodeid_t c;
+ int found_mem = 0;
+ int found_cpu = 0;
+
+ if (!cnode_possible(node))
+ return -EINVAL;
+
+ if (sn_hwperf_has_cpus(node)) {
+ if (near_cpu_node)
+ *near_cpu_node = node;
+ found_cpu++;
+ }
+
+ if (sn_hwperf_has_mem(node)) {
+ if (near_mem_node)
+ *near_mem_node = node;
+ found_mem++;
+ }
+
+ if (found_cpu && found_mem)
+ return 0; /* trivially successful */
+
+ /* find the argument node object */
+ for (i=0, op=objbuf; i < nobj; i++, op++) {
+ if (!SN_HWPERF_IS_NODE(op) && !SN_HWPERF_IS_IONODE(op))
+ continue;
+ if (node == sn_hwperf_obj_to_cnode(op)) {
+ nodeobj = op;
+ break;
+ }
+ }
+ if (!nodeobj) {
+ e = -ENOENT;
+ goto err;
+ }
+
+ /* get it's interconnect topology */
+ sz = op->ports * sizeof(struct sn_hwperf_port_info);
+ BUG_ON(sz > sizeof(ptdata));
+ e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ SN_HWPERF_ENUM_PORTS, nodeobj->id, sz,
+ (u64)&ptdata, 0, 0, NULL);
+ if (e != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ goto err;
+ }
+
+ /* find nearest node with cpus and nearest memory */
+ for (router=NULL, j=0; j < op->ports; j++) {
+ dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id);
+ if (dest && SN_HWPERF_IS_ROUTER(dest))
+ router = dest;
+ if (!dest || SN_HWPERF_FOREIGN(dest) ||
+ !SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) {
+ continue;
+ }
+ c = sn_hwperf_obj_to_cnode(dest);
+ if (!found_cpu && sn_hwperf_has_cpus(c)) {
+ if (near_cpu_node)
+ *near_cpu_node = c;
+ found_cpu++;
+ }
+ if (!found_mem && sn_hwperf_has_mem(c)) {
+ if (near_mem_node)
+ *near_mem_node = c;
+ found_mem++;
+ }
+ }
+
+ if (router && (!found_cpu || !found_mem)) {
+ /* search for a node connected to the same router */
+ sz = router->ports * sizeof(struct sn_hwperf_port_info);
+ BUG_ON(sz > sizeof(ptdata));
+ e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ SN_HWPERF_ENUM_PORTS, router->id, sz,
+ (u64)&ptdata, 0, 0, NULL);
+ if (e != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ goto err;
+ }
+ for (j=0; j < router->ports; j++) {
+ dest = sn_hwperf_findobj_id(objbuf, nobj,
+ ptdata[j].conn_id);
+ if (!dest || dest->id == node ||
+ SN_HWPERF_FOREIGN(dest) ||
+ !SN_HWPERF_IS_NODE(dest) ||
+ SN_HWPERF_IS_IONODE(dest)) {
+ continue;
+ }
+ c = sn_hwperf_obj_to_cnode(dest);
+ if (!found_cpu && sn_hwperf_has_cpus(c)) {
+ if (near_cpu_node)
+ *near_cpu_node = c;
+ found_cpu++;
+ }
+ if (!found_mem && sn_hwperf_has_mem(c)) {
+ if (near_mem_node)
+ *near_mem_node = c;
+ found_mem++;
+ }
+ if (found_cpu && found_mem)
+ break;
+ }
+ }
+
+ if (!found_cpu || !found_mem) {
+ /* resort to _any_ node with CPUs and memory */
+ for (i=0, op=objbuf; i < nobj; i++, op++) {
+ if (SN_HWPERF_FOREIGN(op) ||
+ SN_HWPERF_IS_IONODE(op) ||
+ !SN_HWPERF_IS_NODE(op)) {
+ continue;
+ }
+ c = sn_hwperf_obj_to_cnode(op);
+ if (!found_cpu && sn_hwperf_has_cpus(c)) {
+ if (near_cpu_node)
+ *near_cpu_node = c;
+ found_cpu++;
+ }
+ if (!found_mem && sn_hwperf_has_mem(c)) {
+ if (near_mem_node)
+ *near_mem_node = c;
+ found_mem++;
+ }
+ if (found_cpu && found_mem)
+ break;
+ }
+ }
+
+ if (!found_cpu || !found_mem)
+ e = -ENODATA;
+
+err:
+ return e;
+}
+
+
+static int sn_topology_show(struct seq_file *s, void *d)
+{
+ int sz;
+ int pt;
+ int e = 0;
+ int i;
+ int j;
+ const char *slabname;
+ int ordinal;
+ char slice;
+ struct cpuinfo_ia64 *c;
+ struct sn_hwperf_port_info *ptdata;
+ struct sn_hwperf_object_info *p;
+ struct sn_hwperf_object_info *obj = d; /* this object */
+ struct sn_hwperf_object_info *objs = s->private; /* all objects */
+ u8 shubtype;
+ u8 system_size;
+ u8 sharing_size;
+ u8 partid;
+ u8 coher;
+ u8 nasid_shift;
+ u8 region_size;
+ u16 nasid_mask;
+ int nasid_msb;
+
+ if (obj == objs) {
+ seq_printf(s, "# sn_topology version 2\n");
+ seq_printf(s, "# objtype ordinal location partition"
+ " [attribute value [, ...]]\n");
+
+ if (ia64_sn_get_sn_info(0,
+ &shubtype, &nasid_mask, &nasid_shift, &system_size,
+ &sharing_size, &partid, &coher, &region_size))
+ BUG();
+ for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
+ if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
+ break;
+ }
+ seq_printf(s, "partition %u %s local "
+ "shubtype %s, "
+ "nasid_mask 0x%016llx, "
+ "nasid_bits %d:%d, "
+ "system_size %d, "
+ "sharing_size %d, "
+ "coherency_domain %d, "
+ "region_size %d\n",
+
+ partid, utsname()->nodename,
+ shubtype ? "shub2" : "shub1",
+ (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
+ system_size, sharing_size, coher, region_size);
+
+ print_pci_topology(s);
+ }
+
+ if (SN_HWPERF_FOREIGN(obj)) {
+ /* private in another partition: not interesting */
+ return 0;
+ }
+
+ for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
+ if (obj->name[i] == ' ')
+ obj->name[i] = '_';
+ }
+
+ slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
+ seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
+ obj->sn_hwp_this_part ? "local" : "shared", obj->name);
+
+ if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)))
+ seq_putc(s, '\n');
+ else {
+ cnodeid_t near_mem = -1;
+ cnodeid_t near_cpu = -1;
+
+ seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
+
+ if (sn_hwperf_get_nearest_node_objdata(objs, sn_hwperf_obj_cnt,
+ ordinal, &near_mem, &near_cpu) == 0) {
+ seq_printf(s, ", near_mem_nodeid %d, near_cpu_nodeid %d",
+ near_mem, near_cpu);
+ }
+
+ if (!SN_HWPERF_IS_IONODE(obj)) {
+ for_each_online_node(i) {
+ seq_printf(s, i ? ":%d" : ", dist %d",
+ node_distance(ordinal, i));
+ }
+ }
+
+ seq_putc(s, '\n');
+
+ /*
+ * CPUs on this node, if any
+ */
+ if (!SN_HWPERF_IS_IONODE(obj)) {
+ for_each_cpu_and(i, cpu_online_mask,
+ cpumask_of_node(ordinal)) {
+ slice = 'a' + cpuid_to_slice(i);
+ c = cpu_data(i);
+ seq_printf(s, "cpu %d %s%c local"
+ " freq %luMHz, arch ia64",
+ i, obj->location, slice,
+ c->proc_freq / 1000000);
+ for_each_online_cpu(j) {
+ seq_printf(s, j ? ":%d" : ", dist %d",
+ node_distance(
+ cpu_to_node(i),
+ cpu_to_node(j)));
+ }
+ seq_putc(s, '\n');
+ }
+ }
+ }
+
+ if (obj->ports) {
+ /*
+ * numalink ports
+ */
+ sz = obj->ports * sizeof(struct sn_hwperf_port_info);
+ if ((ptdata = kmalloc(sz, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ SN_HWPERF_ENUM_PORTS, obj->id, sz,
+ (u64) ptdata, 0, 0, NULL);
+ if (e != SN_HWPERF_OP_OK)
+ return -EINVAL;
+ for (ordinal=0, p=objs; p != obj; p++) {
+ if (!SN_HWPERF_FOREIGN(p))
+ ordinal += p->ports;
+ }
+ for (pt = 0; pt < obj->ports; pt++) {
+ for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
+ if (ptdata[pt].conn_id == p->id) {
+ break;
+ }
+ }
+ seq_printf(s, "numalink %d %s-%d",
+ ordinal+pt, obj->location, ptdata[pt].port);
+
+ if (i >= sn_hwperf_obj_cnt) {
+ /* no connection */
+ seq_puts(s, " local endpoint disconnected"
+ ", protocol unknown\n");
+ continue;
+ }
+
+ if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
+ /* both ends local to this partition */
+ seq_puts(s, " local");
+ else if (SN_HWPERF_FOREIGN(p))
+ /* both ends of the link in foreign partition */
+ seq_puts(s, " foreign");
+ else
+ /* link straddles a partition */
+ seq_puts(s, " shared");
+
+ /*
+ * Unlikely, but strictly should query the LLP config
+ * registers because an NL4R can be configured to run
+ * NL3 protocol, even when not talking to an NL3 router.
+ * Ditto for node-node.
+ */
+ seq_printf(s, " endpoint %s-%d, protocol %s\n",
+ p->location, ptdata[pt].conn_port,
+ (SN_HWPERF_IS_NL3ROUTER(obj) ||
+ SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4");
+ }
+ kfree(ptdata);
+ }
+
+ return 0;
+}
+
+static void *sn_topology_start(struct seq_file *s, loff_t * pos)
+{
+ struct sn_hwperf_object_info *objs = s->private;
+
+ if (*pos < sn_hwperf_obj_cnt)
+ return (void *)(objs + *pos);
+
+ return NULL;
+}
+
+static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos)
+{
+ ++*pos;
+ return sn_topology_start(s, pos);
+}
+
+static void sn_topology_stop(struct seq_file *m, void *v)
+{
+ return;
+}
+
+/*
+ * /proc/sgi_sn/sn_topology, read-only using seq_file
+ */
+static const struct seq_operations sn_topology_seq_ops = {
+ .start = sn_topology_start,
+ .next = sn_topology_next,
+ .stop = sn_topology_stop,
+ .show = sn_topology_show
+};
+
+struct sn_hwperf_op_info {
+ u64 op;
+ struct sn_hwperf_ioctl_args *a;
+ void *p;
+ int *v0;
+ int ret;
+};
+
+static void sn_hwperf_call_sal(void *info)
+{
+ struct sn_hwperf_op_info *op_info = info;
+ int r;
+
+ r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op,
+ op_info->a->arg, op_info->a->sz,
+ (u64) op_info->p, 0, 0, op_info->v0);
+ op_info->ret = r;
+}
+
+static long sn_hwperf_call_sal_work(void *info)
+{
+ sn_hwperf_call_sal(info);
+ return 0;
+}
+
+static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
+{
+ u32 cpu;
+ u32 use_ipi;
+ int r = 0;
+
+ cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
+ use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
+ op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
+
+ if (cpu != SN_HWPERF_ARG_ANY_CPU) {
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (cpu == SN_HWPERF_ARG_ANY_CPU) {
+ /* don't care which cpu */
+ sn_hwperf_call_sal(op_info);
+ } else if (cpu == get_cpu()) {
+ /* already on correct cpu */
+ sn_hwperf_call_sal(op_info);
+ put_cpu();
+ } else {
+ put_cpu();
+ if (use_ipi) {
+ /* use an interprocessor interrupt to call SAL */
+ smp_call_function_single(cpu, sn_hwperf_call_sal,
+ op_info, 1);
+ } else {
+ /* Call on the target CPU */
+ work_on_cpu_safe(cpu, sn_hwperf_call_sal_work, op_info);
+ }
+ }
+ r = op_info->ret;
+
+out:
+ return r;
+}
+
+/* map SAL hwperf error code to system error code */
+static int sn_hwperf_map_err(int hwperf_err)
+{
+ int e;
+
+ switch(hwperf_err) {
+ case SN_HWPERF_OP_OK:
+ e = 0;
+ break;
+
+ case SN_HWPERF_OP_NOMEM:
+ e = -ENOMEM;
+ break;
+
+ case SN_HWPERF_OP_NO_PERM:
+ e = -EPERM;
+ break;
+
+ case SN_HWPERF_OP_IO_ERROR:
+ e = -EIO;
+ break;
+
+ case SN_HWPERF_OP_BUSY:
+ e = -EBUSY;
+ break;
+
+ case SN_HWPERF_OP_RECONFIGURE:
+ e = -EAGAIN;
+ break;
+
+ case SN_HWPERF_OP_INVAL:
+ default:
+ e = -EINVAL;
+ break;
+ }
+
+ return e;
+}
+
+/*
+ * ioctl for "sn_hwperf" misc device
+ */
+static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg)
+{
+ struct sn_hwperf_ioctl_args a;
+ struct cpuinfo_ia64 *cdata;
+ struct sn_hwperf_object_info *objs;
+ struct sn_hwperf_object_info *cpuobj;
+ struct sn_hwperf_op_info op_info;
+ void *p = NULL;
+ int nobj;
+ char slice;
+ int node;
+ int r;
+ int v0;
+ int i;
+ int j;
+
+ /* only user requests are allowed here */
+ if ((op & SN_HWPERF_OP_MASK) < 10) {
+ r = -EINVAL;
+ goto error;
+ }
+ r = copy_from_user(&a, (const void __user *)arg,
+ sizeof(struct sn_hwperf_ioctl_args));
+ if (r != 0) {
+ r = -EFAULT;
+ goto error;
+ }
+
+ /*
+ * Allocate memory to hold a kernel copy of the user buffer. The
+ * buffer contents are either copied in or out (or both) of user
+ * space depending on the flags encoded in the requested operation.
+ */
+ if (a.ptr) {
+ p = vmalloc(a.sz);
+ if (!p) {
+ r = -ENOMEM;
+ goto error;
+ }
+ }
+
+ if (op & SN_HWPERF_OP_MEM_COPYIN) {
+ r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
+ if (r != 0) {
+ r = -EFAULT;
+ goto error;
+ }
+ }
+
+ switch (op) {
+ case SN_HWPERF_GET_CPU_INFO:
+ if (a.sz == sizeof(u64)) {
+ /* special case to get size needed */
+ *(u64 *) p = (u64) num_online_cpus() *
+ sizeof(struct sn_hwperf_object_info);
+ } else
+ if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) {
+ r = -ENOMEM;
+ goto error;
+ } else
+ if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+ int cpuobj_index = 0;
+
+ memset(p, 0, a.sz);
+ for (i = 0; i < nobj; i++) {
+ if (!SN_HWPERF_IS_NODE(objs + i))
+ continue;
+ node = sn_hwperf_obj_to_cnode(objs + i);
+ for_each_online_cpu(j) {
+ if (node != cpu_to_node(j))
+ continue;
+ cpuobj = (struct sn_hwperf_object_info *) p + cpuobj_index++;
+ slice = 'a' + cpuid_to_slice(j);
+ cdata = cpu_data(j);
+ cpuobj->id = j;
+ snprintf(cpuobj->name,
+ sizeof(cpuobj->name),
+ "CPU %luMHz %s",
+ cdata->proc_freq / 1000000,
+ cdata->vendor);
+ snprintf(cpuobj->location,
+ sizeof(cpuobj->location),
+ "%s%c", objs[i].location,
+ slice);
+ }
+ }
+
+ vfree(objs);
+ }
+ break;
+
+ case SN_HWPERF_GET_NODE_NASID:
+ if (a.sz != sizeof(u64) ||
+ (node = a.arg) < 0 || !cnode_possible(node)) {
+ r = -EINVAL;
+ goto error;
+ }
+ *(u64 *)p = (u64)cnodeid_to_nasid(node);
+ break;
+
+ case SN_HWPERF_GET_OBJ_NODE:
+ i = a.arg;
+ if (a.sz != sizeof(u64) || i < 0) {
+ r = -EINVAL;
+ goto error;
+ }
+ if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+ if (i >= nobj) {
+ r = -EINVAL;
+ vfree(objs);
+ goto error;
+ }
+ if (objs[i].id != a.arg) {
+ for (i = 0; i < nobj; i++) {
+ if (objs[i].id == a.arg)
+ break;
+ }
+ }
+ if (i == nobj) {
+ r = -EINVAL;
+ vfree(objs);
+ goto error;
+ }
+
+ if (!SN_HWPERF_IS_NODE(objs + i) &&
+ !SN_HWPERF_IS_IONODE(objs + i)) {
+ r = -ENOENT;
+ vfree(objs);
+ goto error;
+ }
+
+ *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
+ vfree(objs);
+ }
+ break;
+
+ case SN_HWPERF_GET_MMRS:
+ case SN_HWPERF_SET_MMRS:
+ case SN_HWPERF_OBJECT_DISTANCE:
+ op_info.p = p;
+ op_info.a = &a;
+ op_info.v0 = &v0;
+ op_info.op = op;
+ r = sn_hwperf_op_cpu(&op_info);
+ if (r) {
+ r = sn_hwperf_map_err(r);
+ a.v0 = v0;
+ goto error;
+ }
+ break;
+
+ default:
+ /* all other ops are a direct SAL call */
+ r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
+ a.arg, a.sz, (u64) p, 0, 0, &v0);
+ if (r) {
+ r = sn_hwperf_map_err(r);
+ goto error;
+ }
+ a.v0 = v0;
+ break;
+ }
+
+ if (op & SN_HWPERF_OP_MEM_COPYOUT) {
+ r = copy_to_user((void __user *)a.ptr, p, a.sz);
+ if (r != 0) {
+ r = -EFAULT;
+ goto error;
+ }
+ }
+
+error:
+ vfree(p);
+
+ return r;
+}
+
+static const struct file_operations sn_hwperf_fops = {
+ .unlocked_ioctl = sn_hwperf_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice sn_hwperf_dev = {
+ MISC_DYNAMIC_MINOR,
+ "sn_hwperf",
+ &sn_hwperf_fops
+};
+
+static int sn_hwperf_init(void)
+{
+ u64 v;
+ int salr;
+ int e = 0;
+
+ /* single threaded, once-only initialization */
+ mutex_lock(&sn_hwperf_init_mutex);
+
+ if (sn_hwperf_salheap) {
+ mutex_unlock(&sn_hwperf_init_mutex);
+ return e;
+ }
+
+ /*
+ * The PROM code needs a fixed reference node. For convenience the
+ * same node as the console I/O is used.
+ */
+ sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid();
+
+ /*
+ * Request the needed size and install the PROM scratch area.
+ * The PROM keeps various tracking bits in this memory area.
+ */
+ salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ (u64) SN_HWPERF_GET_HEAPSIZE, 0,
+ (u64) sizeof(u64), (u64) &v, 0, 0, NULL);
+ if (salr != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ goto out;
+ }
+
+ if ((sn_hwperf_salheap = vmalloc(v)) == NULL) {
+ e = -ENOMEM;
+ goto out;
+ }
+ salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ SN_HWPERF_INSTALL_HEAP, 0, v,
+ (u64) sn_hwperf_salheap, 0, 0, NULL);
+ if (salr != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ goto out;
+ }
+
+ salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ SN_HWPERF_OBJECT_COUNT, 0,
+ sizeof(u64), (u64) &v, 0, 0, NULL);
+ if (salr != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ goto out;
+ }
+ sn_hwperf_obj_cnt = (int)v;
+
+out:
+ if (e < 0 && sn_hwperf_salheap) {
+ vfree(sn_hwperf_salheap);
+ sn_hwperf_salheap = NULL;
+ sn_hwperf_obj_cnt = 0;
+ }
+ mutex_unlock(&sn_hwperf_init_mutex);
+ return e;
+}
+
+int sn_topology_open(struct inode *inode, struct file *file)
+{
+ int e;
+ struct seq_file *seq;
+ struct sn_hwperf_object_info *objbuf;
+ int nobj;
+
+ if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
+ e = seq_open(file, &sn_topology_seq_ops);
+ seq = file->private_data;
+ seq->private = objbuf;
+ }
+
+ return e;
+}
+
+int sn_topology_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+
+ vfree(seq->private);
+ return seq_release(inode, file);
+}
+
+int sn_hwperf_get_nearest_node(cnodeid_t node,
+ cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node)
+{
+ int e;
+ int nobj;
+ struct sn_hwperf_object_info *objbuf;
+
+ if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
+ e = sn_hwperf_get_nearest_node_objdata(objbuf, nobj,
+ node, near_mem_node, near_cpu_node);
+ vfree(objbuf);
+ }
+
+ return e;
+}
+
+static int sn_hwperf_misc_register_init(void)
+{
+ int e;
+
+ if (!ia64_platform_is("sn2"))
+ return 0;
+
+ sn_hwperf_init();
+
+ /*
+ * Register a dynamic misc device for hwperf ioctls. Platforms
+ * supporting hotplug will create /dev/sn_hwperf, else user
+ * can to look up the minor number in /proc/misc.
+ */
+ if ((e = misc_register(&sn_hwperf_dev)) != 0) {
+ printk(KERN_ERR "sn_hwperf_misc_register_init: failed to "
+ "register misc device for \"%s\"\n", sn_hwperf_dev.name);
+ }
+
+ return e;
+}
+
+device_initcall(sn_hwperf_misc_register_init); /* after misc_init() */
+EXPORT_SYMBOL(sn_hwperf_get_nearest_node);
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
new file mode 100644
index 000000000..c2a4d8429
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -0,0 +1,69 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <asm/sn/sn_sal.h>
+
+static int partition_id_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", sn_partition_id);
+ return 0;
+}
+
+static int system_serial_number_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%s\n", sn_system_serial_number());
+ return 0;
+}
+
+static int licenseID_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "0x%llx\n", sn_partition_serial_number_val());
+ return 0;
+}
+
+static int coherence_id_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", partition_coherence_id());
+
+ return 0;
+}
+
+/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */
+extern int sn_topology_open(struct inode *, struct file *);
+extern int sn_topology_release(struct inode *, struct file *);
+
+static const struct file_operations proc_sn_topo_fops = {
+ .open = sn_topology_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = sn_topology_release,
+};
+
+void register_sn_procfs(void)
+{
+ static struct proc_dir_entry *sgi_proc_dir = NULL;
+
+ BUG_ON(sgi_proc_dir != NULL);
+ if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
+ return;
+
+ proc_create_single("partition_id", 0444, sgi_proc_dir,
+ partition_id_show);
+ proc_create_single("system_serial_number", 0444, sgi_proc_dir,
+ system_serial_number_show);
+ proc_create_single("licenseID", 0444, sgi_proc_dir, licenseID_show);
+ proc_create_single("coherence_id", 0444, sgi_proc_dir,
+ coherence_id_show);
+ proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops);
+}
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
new file mode 100644
index 000000000..3009d9d86
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/arch/ia64/sn/kernel/sn2/timer.c
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc.
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger <davidm@hpl.hp.com>: updated for new timer-interpolation infrastructure
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+
+#include <asm/hw_irq.h>
+#include <asm/timex.h>
+
+#include <asm/sn/leds.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/clksupport.h>
+
+extern unsigned long sn_rtc_cycles_per_second;
+
+static u64 read_sn2(struct clocksource *cs)
+{
+ return (u64)readq(RTC_COUNTER_ADDR);
+}
+
+static struct clocksource clocksource_sn2 = {
+ .name = "sn2_rtc",
+ .rating = 450,
+ .read = read_sn2,
+ .mask = (1LL << 55) - 1,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ * sn udelay uses the RTC instead of the ITC because the ITC is not
+ * synchronized across all CPUs, and the thread may migrate to another CPU
+ * if preemption is enabled.
+ */
+static void
+ia64_sn_udelay (unsigned long usecs)
+{
+ unsigned long start = rtc_time();
+ unsigned long end = start +
+ usecs * sn_rtc_cycles_per_second / 1000000;
+
+ while (time_before((unsigned long)rtc_time(), end))
+ cpu_relax();
+}
+
+void __init sn_timer_init(void)
+{
+ clocksource_sn2.archdata.fsys_mmio = RTC_COUNTER_ADDR;
+ clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second);
+
+ ia64_udelay = &ia64_sn_udelay;
+}
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
new file mode 100644
index 000000000..103d6ea8e
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
@@ -0,0 +1,60 @@
+/*
+ *
+ *
+ * Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#include <linux/interrupt.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/leds.h>
+
+extern void sn_lb_int_war_check(void);
+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+#define SN_LB_INT_WAR_INTERVAL 100
+
+void sn_timer_interrupt(int irq, void *dev_id)
+{
+ /* LED blinking */
+ if (!pda->hb_count--) {
+ pda->hb_count = HZ / 2;
+ set_led_bits(pda->hb_state ^=
+ LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
+ }
+
+ if (is_shub1()) {
+ if (enable_shub_wars_1_1()) {
+ /* Bugfix code for SHUB 1.1 */
+ if (pda->pio_shub_war_cam_addr)
+ *pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
+ }
+ if (pda->sn_lb_int_war_ticks == 0)
+ sn_lb_int_war_check();
+ pda->sn_lb_int_war_ticks++;
+ if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL)
+ pda->sn_lb_int_war_ticks = 0;
+ }
+}