From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/sparc/kernel/.gitignore | 2 + arch/sparc/kernel/Makefile | 123 ++ arch/sparc/kernel/adi_64.c | 396 +++++ arch/sparc/kernel/apc.c | 196 +++ arch/sparc/kernel/asm-offsets.c | 60 + arch/sparc/kernel/audit.c | 82 + arch/sparc/kernel/auxio_32.c | 140 ++ arch/sparc/kernel/auxio_64.c | 150 ++ arch/sparc/kernel/btext.c | 673 ++++++++ arch/sparc/kernel/central.c | 271 +++ arch/sparc/kernel/cherrs.S | 576 +++++++ arch/sparc/kernel/chmc.c | 864 ++++++++++ arch/sparc/kernel/compat_audit.c | 48 + arch/sparc/kernel/cpu.c | 556 ++++++ arch/sparc/kernel/cpumap.c | 439 +++++ arch/sparc/kernel/cpumap.h | 17 + arch/sparc/kernel/devices.c | 138 ++ arch/sparc/kernel/ds.c | 1269 ++++++++++++++ arch/sparc/kernel/dtlb_miss.S | 40 + arch/sparc/kernel/dtlb_prot.S | 55 + arch/sparc/kernel/ebus.c | 257 +++ arch/sparc/kernel/entry.S | 1355 +++++++++++++++ arch/sparc/kernel/entry.h | 253 +++ arch/sparc/kernel/etrap_32.S | 281 +++ arch/sparc/kernel/etrap_64.S | 284 ++++ arch/sparc/kernel/fpu_traps.S | 384 +++++ arch/sparc/kernel/ftrace.c | 133 ++ arch/sparc/kernel/getsetcc.S | 25 + arch/sparc/kernel/head_32.S | 812 +++++++++ arch/sparc/kernel/head_64.S | 969 +++++++++++ arch/sparc/kernel/helpers.S | 66 + arch/sparc/kernel/hvapi.c | 205 +++ arch/sparc/kernel/hvcalls.S | 930 ++++++++++ arch/sparc/kernel/hvtramp.S | 137 ++ arch/sparc/kernel/idprom.c | 99 ++ arch/sparc/kernel/iommu-common.c | 260 +++ arch/sparc/kernel/iommu.c | 766 +++++++++ arch/sparc/kernel/iommu_common.h | 51 + arch/sparc/kernel/ioport.c | 344 ++++ arch/sparc/kernel/irq.h | 102 ++ arch/sparc/kernel/irq_32.c | 363 ++++ arch/sparc/kernel/irq_64.c | 1159 +++++++++++++ arch/sparc/kernel/itlb_miss.S | 40 + arch/sparc/kernel/ivec.S | 52 + arch/sparc/kernel/jump_label.c | 47 + arch/sparc/kernel/kernel.h | 193 +++ arch/sparc/kernel/kgdb_32.c | 172 ++ arch/sparc/kernel/kgdb_64.c | 201 +++ arch/sparc/kernel/kprobes.c | 489 ++++++ arch/sparc/kernel/kstack.h | 84 + arch/sparc/kernel/ktlb.S | 270 +++ arch/sparc/kernel/ldc.c | 2428 ++++++++++++++++++++++++++ arch/sparc/kernel/led.c | 146 ++ arch/sparc/kernel/leon_kernel.c | 511 ++++++ arch/sparc/kernel/leon_pci.c | 89 + arch/sparc/kernel/leon_pci_grpci1.c | 722 ++++++++ arch/sparc/kernel/leon_pci_grpci2.c | 913 ++++++++++ arch/sparc/kernel/leon_pmc.c | 94 + arch/sparc/kernel/leon_smp.c | 470 +++++ arch/sparc/kernel/mdesc.c | 1353 +++++++++++++++ arch/sparc/kernel/misctrap.S | 95 ++ arch/sparc/kernel/module.c | 225 +++ arch/sparc/kernel/nmi.c | 314 ++++ arch/sparc/kernel/of_device_32.c | 433 +++++ arch/sparc/kernel/of_device_64.c | 728 ++++++++ arch/sparc/kernel/of_device_common.c | 180 ++ arch/sparc/kernel/of_device_common.h | 37 + arch/sparc/kernel/pci.c | 1011 +++++++++++ arch/sparc/kernel/pci_common.c | 546 ++++++ arch/sparc/kernel/pci_fire.c | 523 ++++++ arch/sparc/kernel/pci_impl.h | 192 +++ arch/sparc/kernel/pci_msi.c | 447 +++++ arch/sparc/kernel/pci_psycho.c | 618 +++++++ arch/sparc/kernel/pci_sabre.c | 614 +++++++ arch/sparc/kernel/pci_schizo.c | 1510 +++++++++++++++++ arch/sparc/kernel/pci_sun4v.c | 1349 +++++++++++++++ arch/sparc/kernel/pci_sun4v.h | 114 ++ arch/sparc/kernel/pci_sun4v_asm.S | 431 +++++ arch/sparc/kernel/pcic.c | 841 +++++++++ arch/sparc/kernel/pcr.c | 373 ++++ arch/sparc/kernel/perf_event.c | 1877 ++++++++++++++++++++ arch/sparc/kernel/pmc.c | 100 ++ arch/sparc/kernel/power.c | 71 + arch/sparc/kernel/process.c | 110 ++ arch/sparc/kernel/process_32.c | 394 +++++ arch/sparc/kernel/process_64.c | 682 ++++++++ arch/sparc/kernel/prom.h | 12 + arch/sparc/kernel/prom_32.c | 323 ++++ arch/sparc/kernel/prom_64.c | 633 +++++++ arch/sparc/kernel/prom_common.c | 158 ++ arch/sparc/kernel/prom_irqtrans.c | 843 +++++++++ arch/sparc/kernel/psycho_common.c | 472 ++++++ arch/sparc/kernel/psycho_common.h | 49 + arch/sparc/kernel/ptrace_32.c | 447 +++++ arch/sparc/kernel/ptrace_64.c | 1177 +++++++++++++ arch/sparc/kernel/reboot.c | 55 + arch/sparc/kernel/rtrap_32.S | 265 +++ arch/sparc/kernel/rtrap_64.S | 378 +++++ arch/sparc/kernel/sbus.c | 678 ++++++++ arch/sparc/kernel/setup_32.c | 421 +++++ arch/sparc/kernel/setup_64.c | 711 ++++++++ arch/sparc/kernel/signal32.c | 784 +++++++++ arch/sparc/kernel/signal_32.c | 565 ++++++ arch/sparc/kernel/signal_64.c | 595 +++++++ arch/sparc/kernel/sigutil.h | 10 + arch/sparc/kernel/sigutil_32.c | 129 ++ arch/sparc/kernel/sigutil_64.c | 102 ++ arch/sparc/kernel/smp_32.c | 389 +++++ arch/sparc/kernel/smp_64.c | 1568 +++++++++++++++++ arch/sparc/kernel/sparc_ksyms.c | 12 + arch/sparc/kernel/spiterrs.S | 241 +++ arch/sparc/kernel/sstate.c | 124 ++ arch/sparc/kernel/stacktrace.c | 89 + arch/sparc/kernel/starfire.c | 112 ++ arch/sparc/kernel/sun4d_irq.c | 519 ++++++ arch/sparc/kernel/sun4d_smp.c | 415 +++++ arch/sparc/kernel/sun4m_irq.c | 478 ++++++ arch/sparc/kernel/sun4m_smp.c | 275 +++ arch/sparc/kernel/sun4v_ivec.S | 357 ++++ arch/sparc/kernel/sun4v_mcd.S | 17 + arch/sparc/kernel/sun4v_tlb_miss.S | 437 +++++ arch/sparc/kernel/sys32.S | 241 +++ arch/sparc/kernel/sys_sparc32.c | 237 +++ arch/sparc/kernel/sys_sparc_32.c | 223 +++ arch/sparc/kernel/sys_sparc_64.c | 709 ++++++++ arch/sparc/kernel/syscalls.S | 301 ++++ arch/sparc/kernel/syscalls/Makefile | 33 + arch/sparc/kernel/syscalls/syscall.tbl | 498 ++++++ arch/sparc/kernel/sysfs.c | 265 +++ arch/sparc/kernel/systbls.h | 103 ++ arch/sparc/kernel/systbls_32.S | 18 + arch/sparc/kernel/systbls_64.S | 29 + arch/sparc/kernel/termios.c | 115 ++ arch/sparc/kernel/time_32.c | 356 ++++ arch/sparc/kernel/time_64.c | 899 ++++++++++ arch/sparc/kernel/trampoline_32.S | 201 +++ arch/sparc/kernel/trampoline_64.S | 414 +++++ arch/sparc/kernel/traps_32.c | 394 +++++ arch/sparc/kernel/traps_64.c | 2924 ++++++++++++++++++++++++++++++++ arch/sparc/kernel/tsb.S | 591 +++++++ arch/sparc/kernel/ttable_32.S | 418 +++++ arch/sparc/kernel/ttable_64.S | 275 +++ arch/sparc/kernel/una_asm_32.S | 154 ++ arch/sparc/kernel/una_asm_64.S | 147 ++ arch/sparc/kernel/unaligned_32.c | 282 +++ arch/sparc/kernel/unaligned_64.c | 709 ++++++++ arch/sparc/kernel/uprobes.c | 318 ++++ arch/sparc/kernel/urtt_fill.S | 105 ++ arch/sparc/kernel/utrap.S | 29 + arch/sparc/kernel/vdso.c | 69 + arch/sparc/kernel/vio.c | 573 +++++++ arch/sparc/kernel/viohs.c | 860 ++++++++++ arch/sparc/kernel/visemul.c | 899 ++++++++++ arch/sparc/kernel/vmlinux.lds.S | 193 +++ arch/sparc/kernel/windows.c | 130 ++ arch/sparc/kernel/winfixup.S | 160 ++ arch/sparc/kernel/wof.S | 366 ++++ arch/sparc/kernel/wuf.S | 313 ++++ 158 files changed, 65810 insertions(+) create mode 100644 arch/sparc/kernel/.gitignore create mode 100644 arch/sparc/kernel/Makefile create mode 100644 arch/sparc/kernel/adi_64.c create mode 100644 arch/sparc/kernel/apc.c create mode 100644 arch/sparc/kernel/asm-offsets.c create mode 100644 arch/sparc/kernel/audit.c create mode 100644 arch/sparc/kernel/auxio_32.c create mode 100644 arch/sparc/kernel/auxio_64.c create mode 100644 arch/sparc/kernel/btext.c create mode 100644 arch/sparc/kernel/central.c create mode 100644 arch/sparc/kernel/cherrs.S create mode 100644 arch/sparc/kernel/chmc.c create mode 100644 arch/sparc/kernel/compat_audit.c create mode 100644 arch/sparc/kernel/cpu.c create mode 100644 arch/sparc/kernel/cpumap.c create mode 100644 arch/sparc/kernel/cpumap.h create mode 100644 arch/sparc/kernel/devices.c create mode 100644 arch/sparc/kernel/ds.c create mode 100644 arch/sparc/kernel/dtlb_miss.S create mode 100644 arch/sparc/kernel/dtlb_prot.S create mode 100644 arch/sparc/kernel/ebus.c create mode 100644 arch/sparc/kernel/entry.S create mode 100644 arch/sparc/kernel/entry.h create mode 100644 arch/sparc/kernel/etrap_32.S create mode 100644 arch/sparc/kernel/etrap_64.S create mode 100644 arch/sparc/kernel/fpu_traps.S create mode 100644 arch/sparc/kernel/ftrace.c create mode 100644 arch/sparc/kernel/getsetcc.S create mode 100644 arch/sparc/kernel/head_32.S create mode 100644 arch/sparc/kernel/head_64.S create mode 100644 arch/sparc/kernel/helpers.S create mode 100644 arch/sparc/kernel/hvapi.c create mode 100644 arch/sparc/kernel/hvcalls.S create mode 100644 arch/sparc/kernel/hvtramp.S create mode 100644 arch/sparc/kernel/idprom.c create mode 100644 arch/sparc/kernel/iommu-common.c create mode 100644 arch/sparc/kernel/iommu.c create mode 100644 arch/sparc/kernel/iommu_common.h create mode 100644 arch/sparc/kernel/ioport.c create mode 100644 arch/sparc/kernel/irq.h create mode 100644 arch/sparc/kernel/irq_32.c create mode 100644 arch/sparc/kernel/irq_64.c create mode 100644 arch/sparc/kernel/itlb_miss.S create mode 100644 arch/sparc/kernel/ivec.S create mode 100644 arch/sparc/kernel/jump_label.c create mode 100644 arch/sparc/kernel/kernel.h create mode 100644 arch/sparc/kernel/kgdb_32.c create mode 100644 arch/sparc/kernel/kgdb_64.c create mode 100644 arch/sparc/kernel/kprobes.c create mode 100644 arch/sparc/kernel/kstack.h create mode 100644 arch/sparc/kernel/ktlb.S create mode 100644 arch/sparc/kernel/ldc.c create mode 100644 arch/sparc/kernel/led.c create mode 100644 arch/sparc/kernel/leon_kernel.c create mode 100644 arch/sparc/kernel/leon_pci.c create mode 100644 arch/sparc/kernel/leon_pci_grpci1.c create mode 100644 arch/sparc/kernel/leon_pci_grpci2.c create mode 100644 arch/sparc/kernel/leon_pmc.c create mode 100644 arch/sparc/kernel/leon_smp.c create mode 100644 arch/sparc/kernel/mdesc.c create mode 100644 arch/sparc/kernel/misctrap.S create mode 100644 arch/sparc/kernel/module.c create mode 100644 arch/sparc/kernel/nmi.c create mode 100644 arch/sparc/kernel/of_device_32.c create mode 100644 arch/sparc/kernel/of_device_64.c create mode 100644 arch/sparc/kernel/of_device_common.c create mode 100644 arch/sparc/kernel/of_device_common.h create mode 100644 arch/sparc/kernel/pci.c create mode 100644 arch/sparc/kernel/pci_common.c create mode 100644 arch/sparc/kernel/pci_fire.c create mode 100644 arch/sparc/kernel/pci_impl.h create mode 100644 arch/sparc/kernel/pci_msi.c create mode 100644 arch/sparc/kernel/pci_psycho.c create mode 100644 arch/sparc/kernel/pci_sabre.c create mode 100644 arch/sparc/kernel/pci_schizo.c create mode 100644 arch/sparc/kernel/pci_sun4v.c create mode 100644 arch/sparc/kernel/pci_sun4v.h create mode 100644 arch/sparc/kernel/pci_sun4v_asm.S create mode 100644 arch/sparc/kernel/pcic.c create mode 100644 arch/sparc/kernel/pcr.c create mode 100644 arch/sparc/kernel/perf_event.c create mode 100644 arch/sparc/kernel/pmc.c create mode 100644 arch/sparc/kernel/power.c create mode 100644 arch/sparc/kernel/process.c create mode 100644 arch/sparc/kernel/process_32.c create mode 100644 arch/sparc/kernel/process_64.c create mode 100644 arch/sparc/kernel/prom.h create mode 100644 arch/sparc/kernel/prom_32.c create mode 100644 arch/sparc/kernel/prom_64.c create mode 100644 arch/sparc/kernel/prom_common.c create mode 100644 arch/sparc/kernel/prom_irqtrans.c create mode 100644 arch/sparc/kernel/psycho_common.c create mode 100644 arch/sparc/kernel/psycho_common.h create mode 100644 arch/sparc/kernel/ptrace_32.c create mode 100644 arch/sparc/kernel/ptrace_64.c create mode 100644 arch/sparc/kernel/reboot.c create mode 100644 arch/sparc/kernel/rtrap_32.S create mode 100644 arch/sparc/kernel/rtrap_64.S create mode 100644 arch/sparc/kernel/sbus.c create mode 100644 arch/sparc/kernel/setup_32.c create mode 100644 arch/sparc/kernel/setup_64.c create mode 100644 arch/sparc/kernel/signal32.c create mode 100644 arch/sparc/kernel/signal_32.c create mode 100644 arch/sparc/kernel/signal_64.c create mode 100644 arch/sparc/kernel/sigutil.h create mode 100644 arch/sparc/kernel/sigutil_32.c create mode 100644 arch/sparc/kernel/sigutil_64.c create mode 100644 arch/sparc/kernel/smp_32.c create mode 100644 arch/sparc/kernel/smp_64.c create mode 100644 arch/sparc/kernel/sparc_ksyms.c create mode 100644 arch/sparc/kernel/spiterrs.S create mode 100644 arch/sparc/kernel/sstate.c create mode 100644 arch/sparc/kernel/stacktrace.c create mode 100644 arch/sparc/kernel/starfire.c create mode 100644 arch/sparc/kernel/sun4d_irq.c create mode 100644 arch/sparc/kernel/sun4d_smp.c create mode 100644 arch/sparc/kernel/sun4m_irq.c create mode 100644 arch/sparc/kernel/sun4m_smp.c create mode 100644 arch/sparc/kernel/sun4v_ivec.S create mode 100644 arch/sparc/kernel/sun4v_mcd.S create mode 100644 arch/sparc/kernel/sun4v_tlb_miss.S create mode 100644 arch/sparc/kernel/sys32.S create mode 100644 arch/sparc/kernel/sys_sparc32.c create mode 100644 arch/sparc/kernel/sys_sparc_32.c create mode 100644 arch/sparc/kernel/sys_sparc_64.c create mode 100644 arch/sparc/kernel/syscalls.S create mode 100644 arch/sparc/kernel/syscalls/Makefile create mode 100644 arch/sparc/kernel/syscalls/syscall.tbl create mode 100644 arch/sparc/kernel/sysfs.c create mode 100644 arch/sparc/kernel/systbls.h create mode 100644 arch/sparc/kernel/systbls_32.S create mode 100644 arch/sparc/kernel/systbls_64.S create mode 100644 arch/sparc/kernel/termios.c create mode 100644 arch/sparc/kernel/time_32.c create mode 100644 arch/sparc/kernel/time_64.c create mode 100644 arch/sparc/kernel/trampoline_32.S create mode 100644 arch/sparc/kernel/trampoline_64.S create mode 100644 arch/sparc/kernel/traps_32.c create mode 100644 arch/sparc/kernel/traps_64.c create mode 100644 arch/sparc/kernel/tsb.S create mode 100644 arch/sparc/kernel/ttable_32.S create mode 100644 arch/sparc/kernel/ttable_64.S create mode 100644 arch/sparc/kernel/una_asm_32.S create mode 100644 arch/sparc/kernel/una_asm_64.S create mode 100644 arch/sparc/kernel/unaligned_32.c create mode 100644 arch/sparc/kernel/unaligned_64.c create mode 100644 arch/sparc/kernel/uprobes.c create mode 100644 arch/sparc/kernel/urtt_fill.S create mode 100644 arch/sparc/kernel/utrap.S create mode 100644 arch/sparc/kernel/vdso.c create mode 100644 arch/sparc/kernel/vio.c create mode 100644 arch/sparc/kernel/viohs.c create mode 100644 arch/sparc/kernel/visemul.c create mode 100644 arch/sparc/kernel/vmlinux.lds.S create mode 100644 arch/sparc/kernel/windows.c create mode 100644 arch/sparc/kernel/winfixup.S create mode 100644 arch/sparc/kernel/wof.S create mode 100644 arch/sparc/kernel/wuf.S (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/.gitignore b/arch/sparc/kernel/.gitignore new file mode 100644 index 000000000..bbb90f92d --- /dev/null +++ b/arch/sparc/kernel/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +vmlinux.lds diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile new file mode 100644 index 000000000..0984bb6f0 --- /dev/null +++ b/arch/sparc/kernel/Makefile @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: GPL-2.0 + +# +# Makefile for the linux kernel. +# + +asflags-y := -ansi +ccflags-y := -Werror + +# Undefine sparc when processing vmlinux.lds - it is used +# And teach CPP we are doing $(BITS) builds (for this case) +CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) +extra-y += vmlinux.lds + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_ftrace.o := -pg +CFLAGS_REMOVE_time_$(BITS).o := -pg +CFLAGS_REMOVE_perf_event.o := -pg +CFLAGS_REMOVE_pcr.o := -pg +endif + +obj-y := head_$(BITS).o +obj-$(CONFIG_SPARC64) += urtt_fill.o +obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o +obj-$(CONFIG_SPARC32) += etrap_32.o +obj-$(CONFIG_SPARC32) += rtrap_32.o +obj-y += traps_$(BITS).o + +# IRQ +obj-y += irq_$(BITS).o +obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4d_irq.o + +obj-y += process_$(BITS).o +obj-y += process.o +obj-y += signal_$(BITS).o +obj-y += sigutil_$(BITS).o +obj-$(CONFIG_SPARC32) += ioport.o +obj-y += setup_$(BITS).o +obj-y += idprom.o +obj-y += sys_sparc_$(BITS).o +obj-$(CONFIG_SPARC32) += systbls_32.o +obj-y += time_$(BITS).o +obj-$(CONFIG_SPARC32) += windows.o +obj-y += cpu.o +obj-$(CONFIG_SPARC64) += vdso.o +obj-$(CONFIG_SPARC32) += devices.o +obj-y += ptrace_$(BITS).o +obj-y += unaligned_$(BITS).o +obj-y += una_asm_$(BITS).o +obj-y += prom_common.o +obj-y += prom_$(BITS).o +obj-y += of_device_common.o +obj-y += of_device_$(BITS).o +obj-$(CONFIG_SPARC64) += prom_irqtrans.o + +obj-$(CONFIG_SPARC32) += leon_kernel.o +obj-$(CONFIG_SPARC32) += leon_pmc.o + +obj-$(CONFIG_SPARC64) += reboot.o +obj-$(CONFIG_SPARC64) += sysfs.o +obj-$(CONFIG_SPARC64) += iommu.o iommu-common.o +obj-$(CONFIG_SPARC64) += central.o +obj-$(CONFIG_SPARC64) += starfire.o +obj-$(CONFIG_SPARC64) += power.o +obj-$(CONFIG_SPARC64) += sbus.o +obj-$(CONFIG_SPARC64) += ebus.o +obj-$(CONFIG_SPARC64) += visemul.o +obj-$(CONFIG_SPARC64) += hvapi.o +obj-$(CONFIG_SPARC64) += sstate.o +obj-$(CONFIG_SPARC64) += mdesc.o +obj-$(CONFIG_SPARC64) += adi_64.o +obj-$(CONFIG_SPARC64) += pcr.o +obj-$(CONFIG_SPARC64) += nmi.o +obj-$(CONFIG_SPARC64_SMP) += cpumap.o + +obj-$(CONFIG_PCIC_PCI) += pcic.o +obj-$(CONFIG_LEON_PCI) += leon_pci.o +obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o +obj-$(CONFIG_SPARC_GRPCI1)+= leon_pci_grpci1.o + +obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o +obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o +obj-$(CONFIG_SPARC64_SMP) += hvtramp.o + +obj-y += auxio_$(BITS).o +obj-$(CONFIG_SUN_PM) += apc.o pmc.o + +obj-y += termios.o + +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_MODULES) += sparc_ksyms.o +obj-$(CONFIG_SPARC_LED) += led.o +obj-$(CONFIG_KGDB) += kgdb_$(BITS).o + +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o + +obj-$(CONFIG_EARLYFB) += btext.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +# sparc64 PCI +obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o +obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o +obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o +obj-$(CONFIG_SPARC64_PCI_MSI) += pci_msi.o + + +obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o + +obj-$(CONFIG_US3_MC) += chmc.o + +obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o + +obj-$(CONFIG_AUDIT) += audit.o +audit--$(CONFIG_AUDIT) := compat_audit.o +obj-$(CONFIG_COMPAT) += $(audit--y) + +pc--$(CONFIG_PERF_EVENTS) := perf_event.o +obj-$(CONFIG_SPARC64) += $(pc--y) + +obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/sparc/kernel/adi_64.c b/arch/sparc/kernel/adi_64.c new file mode 100644 index 000000000..ce332942d --- /dev/null +++ b/arch/sparc/kernel/adi_64.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* adi_64.c: support for ADI (Application Data Integrity) feature on + * sparc m7 and newer processors. This feature is also known as + * SSM (Silicon Secured Memory). + * + * Copyright (C) 2016 Oracle and/or its affiliates. All rights reserved. + * Author: Khalid Aziz (khalid.aziz@oracle.com) + */ +#include +#include +#include +#include +#include +#include +#include + +/* Each page of storage for ADI tags can accommodate tags for 128 + * pages. When ADI enabled pages are being swapped out, it would be + * prudent to allocate at least enough tag storage space to accommodate + * SWAPFILE_CLUSTER number of pages. Allocate enough tag storage to + * store tags for four SWAPFILE_CLUSTER pages to reduce need for + * further allocations for same vma. + */ +#define TAG_STORAGE_PAGES 8 + +struct adi_config adi_state; +EXPORT_SYMBOL(adi_state); + +/* mdesc_adi_init() : Parse machine description provided by the + * hypervisor to detect ADI capabilities + * + * Hypervisor reports ADI capabilities of platform in "hwcap-list" property + * for "cpu" node. If the platform supports ADI, "hwcap-list" property + * contains the keyword "adp". If the platform supports ADI, "platform" + * node will contain "adp-blksz", "adp-nbits" and "ue-on-adp" properties + * to describe the ADI capabilities. + */ +void __init mdesc_adi_init(void) +{ + struct mdesc_handle *hp = mdesc_grab(); + const char *prop; + u64 pn, *val; + int len; + + if (!hp) + goto adi_not_found; + + pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu"); + if (pn == MDESC_NODE_NULL) + goto adi_not_found; + + prop = mdesc_get_property(hp, pn, "hwcap-list", &len); + if (!prop) + goto adi_not_found; + + /* + * Look for "adp" keyword in hwcap-list which would indicate + * ADI support + */ + adi_state.enabled = false; + while (len) { + int plen; + + if (!strcmp(prop, "adp")) { + adi_state.enabled = true; + break; + } + + plen = strlen(prop) + 1; + prop += plen; + len -= plen; + } + + if (!adi_state.enabled) + goto adi_not_found; + + /* Find the ADI properties in "platform" node. If all ADI + * properties are not found, ADI support is incomplete and + * do not enable ADI in the kernel. + */ + pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); + if (pn == MDESC_NODE_NULL) + goto adi_not_found; + + val = (u64 *) mdesc_get_property(hp, pn, "adp-blksz", &len); + if (!val) + goto adi_not_found; + adi_state.caps.blksz = *val; + + val = (u64 *) mdesc_get_property(hp, pn, "adp-nbits", &len); + if (!val) + goto adi_not_found; + adi_state.caps.nbits = *val; + + val = (u64 *) mdesc_get_property(hp, pn, "ue-on-adp", &len); + if (!val) + goto adi_not_found; + adi_state.caps.ue_on_adi = *val; + + /* Some of the code to support swapping ADI tags is written + * assumption that two ADI tags can fit inside one byte. If + * this assumption is broken by a future architecture change, + * that code will have to be revisited. If that were to happen, + * disable ADI support so we do not get unpredictable results + * with programs trying to use ADI and their pages getting + * swapped out + */ + if (adi_state.caps.nbits > 4) { + pr_warn("WARNING: ADI tag size >4 on this platform. Disabling AADI support\n"); + adi_state.enabled = false; + } + + mdesc_release(hp); + return; + +adi_not_found: + adi_state.enabled = false; + adi_state.caps.blksz = 0; + adi_state.caps.nbits = 0; + if (hp) + mdesc_release(hp); +} + +tag_storage_desc_t *find_tag_store(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr) +{ + tag_storage_desc_t *tag_desc = NULL; + unsigned long i, max_desc, flags; + + /* Check if this vma already has tag storage descriptor + * allocated for it. + */ + max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); + if (mm->context.tag_store) { + tag_desc = mm->context.tag_store; + spin_lock_irqsave(&mm->context.tag_lock, flags); + for (i = 0; i < max_desc; i++) { + if ((addr >= tag_desc->start) && + ((addr + PAGE_SIZE - 1) <= tag_desc->end)) + break; + tag_desc++; + } + spin_unlock_irqrestore(&mm->context.tag_lock, flags); + + /* If no matching entries were found, this must be a + * freshly allocated page + */ + if (i >= max_desc) + tag_desc = NULL; + } + + return tag_desc; +} + +tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr) +{ + unsigned char *tags; + unsigned long i, size, max_desc, flags; + tag_storage_desc_t *tag_desc, *open_desc; + unsigned long end_addr, hole_start, hole_end; + + max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); + open_desc = NULL; + hole_start = 0; + hole_end = ULONG_MAX; + end_addr = addr + PAGE_SIZE - 1; + + /* Check if this vma already has tag storage descriptor + * allocated for it. + */ + spin_lock_irqsave(&mm->context.tag_lock, flags); + if (mm->context.tag_store) { + tag_desc = mm->context.tag_store; + + /* Look for a matching entry for this address. While doing + * that, look for the first open slot as well and find + * the hole in already allocated range where this request + * will fit in. + */ + for (i = 0; i < max_desc; i++) { + if (tag_desc->tag_users == 0) { + if (open_desc == NULL) + open_desc = tag_desc; + } else { + if ((addr >= tag_desc->start) && + (tag_desc->end >= (addr + PAGE_SIZE - 1))) { + tag_desc->tag_users++; + goto out; + } + } + if ((tag_desc->start > end_addr) && + (tag_desc->start < hole_end)) + hole_end = tag_desc->start; + if ((tag_desc->end < addr) && + (tag_desc->end > hole_start)) + hole_start = tag_desc->end; + tag_desc++; + } + + } else { + size = sizeof(tag_storage_desc_t)*max_desc; + mm->context.tag_store = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN); + if (mm->context.tag_store == NULL) { + tag_desc = NULL; + goto out; + } + tag_desc = mm->context.tag_store; + for (i = 0; i < max_desc; i++, tag_desc++) + tag_desc->tag_users = 0; + open_desc = mm->context.tag_store; + i = 0; + } + + /* Check if we ran out of tag storage descriptors */ + if (open_desc == NULL) { + tag_desc = NULL; + goto out; + } + + /* Mark this tag descriptor slot in use and then initialize it */ + tag_desc = open_desc; + tag_desc->tag_users = 1; + + /* Tag storage has not been allocated for this vma and space + * is available in tag storage descriptor. Since this page is + * being swapped out, there is high probability subsequent pages + * in the VMA will be swapped out as well. Allocate pages to + * store tags for as many pages in this vma as possible but not + * more than TAG_STORAGE_PAGES. Each byte in tag space holds + * two ADI tags since each ADI tag is 4 bits. Each ADI tag + * covers adi_blksize() worth of addresses. Check if the hole is + * big enough to accommodate full address range for using + * TAG_STORAGE_PAGES number of tag pages. + */ + size = TAG_STORAGE_PAGES * PAGE_SIZE; + end_addr = addr + (size*2*adi_blksize()) - 1; + /* Check for overflow. If overflow occurs, allocate only one page */ + if (end_addr < addr) { + size = PAGE_SIZE; + end_addr = addr + (size*2*adi_blksize()) - 1; + /* If overflow happens with the minimum tag storage + * allocation as well, adjust ending address for this + * tag storage. + */ + if (end_addr < addr) + end_addr = ULONG_MAX; + } + if (hole_end < end_addr) { + /* Available hole is too small on the upper end of + * address. Can we expand the range towards the lower + * address and maximize use of this slot? + */ + unsigned long tmp_addr; + + end_addr = hole_end - 1; + tmp_addr = end_addr - (size*2*adi_blksize()) + 1; + /* Check for underflow. If underflow occurs, allocate + * only one page for storing ADI tags + */ + if (tmp_addr > addr) { + size = PAGE_SIZE; + tmp_addr = end_addr - (size*2*adi_blksize()) - 1; + /* If underflow happens with the minimum tag storage + * allocation as well, adjust starting address for + * this tag storage. + */ + if (tmp_addr > addr) + tmp_addr = 0; + } + if (tmp_addr < hole_start) { + /* Available hole is restricted on lower address + * end as well + */ + tmp_addr = hole_start + 1; + } + addr = tmp_addr; + size = (end_addr + 1 - addr)/(2*adi_blksize()); + size = (size + (PAGE_SIZE-adi_blksize()))/PAGE_SIZE; + size = size * PAGE_SIZE; + } + tags = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN); + if (tags == NULL) { + tag_desc->tag_users = 0; + tag_desc = NULL; + goto out; + } + tag_desc->start = addr; + tag_desc->tags = tags; + tag_desc->end = end_addr; + +out: + spin_unlock_irqrestore(&mm->context.tag_lock, flags); + return tag_desc; +} + +void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm) +{ + unsigned long flags; + unsigned char *tags = NULL; + + spin_lock_irqsave(&mm->context.tag_lock, flags); + tag_desc->tag_users--; + if (tag_desc->tag_users == 0) { + tag_desc->start = tag_desc->end = 0; + /* Do not free up the tag storage space allocated + * by the first descriptor. This is persistent + * emergency tag storage space for the task. + */ + if (tag_desc != mm->context.tag_store) { + tags = tag_desc->tags; + tag_desc->tags = NULL; + } + } + spin_unlock_irqrestore(&mm->context.tag_lock, flags); + kfree(tags); +} + +#define tag_start(addr, tag_desc) \ + ((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize()))) + +/* Retrieve any saved ADI tags for the page being swapped back in and + * restore these tags to the newly allocated physical page. + */ +void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pte_t pte) +{ + unsigned char *tag; + tag_storage_desc_t *tag_desc; + unsigned long paddr, tmp, version1, version2; + + /* Check if the swapped out page has an ADI version + * saved. If yes, restore version tag to the newly + * allocated page. + */ + tag_desc = find_tag_store(mm, vma, addr); + if (tag_desc == NULL) + return; + + tag = tag_start(addr, tag_desc); + paddr = pte_val(pte) & _PAGE_PADDR_4V; + for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) { + version1 = (*tag) >> 4; + version2 = (*tag) & 0x0f; + *tag++ = 0; + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (version1), "r" (tmp), + "i" (ASI_MCD_REAL)); + tmp += adi_blksize(); + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (version2), "r" (tmp), + "i" (ASI_MCD_REAL)); + } + asm volatile("membar #Sync\n\t"); + + /* Check and mark this tag space for release later if + * the swapped in page was the last user of tag space + */ + del_tag_store(tag_desc, mm); +} + +/* A page is about to be swapped out. Save any ADI tags associated with + * this physical page so they can be restored later when the page is swapped + * back in. + */ +int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pte_t oldpte) +{ + unsigned char *tag; + tag_storage_desc_t *tag_desc; + unsigned long version1, version2, paddr, tmp; + + tag_desc = alloc_tag_store(mm, vma, addr); + if (tag_desc == NULL) + return -1; + + tag = tag_start(addr, tag_desc); + paddr = pte_val(oldpte) & _PAGE_PADDR_4V; + for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) { + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (version1) + : "r" (tmp), "i" (ASI_MCD_REAL)); + tmp += adi_blksize(); + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (version2) + : "r" (tmp), "i" (ASI_MCD_REAL)); + *tag = (version1 << 4) | version2; + tag++; + } + + return 0; +} diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c new file mode 100644 index 000000000..ecd05bc0a --- /dev/null +++ b/arch/sparc/kernel/apc.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* apc - Driver implementation for power management functions + * of Aurora Personality Chip (APC) on SPARCstation-4/5 and + * derivatives. + * + * Copyright (c) 2002 Eric Brower (ebrower@usa.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Debugging + * + * #define APC_DEBUG_LED + */ + +#define APC_MINOR MISC_DYNAMIC_MINOR +#define APC_OBPNAME "power-management" +#define APC_DEVNAME "apc" + +static u8 __iomem *regs; +static int apc_no_idle = 0; + +#define apc_readb(offs) (sbus_readb(regs+offs)) +#define apc_writeb(val, offs) (sbus_writeb(val, regs+offs)) + +/* Specify "apc=noidle" on the kernel command line to + * disable APC CPU standby support. Certain prototype + * systems (SPARCstation-Fox) do not play well with APC + * CPU idle, so disable this if your system has APC and + * crashes randomly. + */ +static int __init apc_setup(char *str) +{ + if(!strncmp(str, "noidle", strlen("noidle"))) { + apc_no_idle = 1; + return 1; + } + return 0; +} +__setup("apc=", apc_setup); + +/* + * CPU idle callback function + * See .../arch/sparc/kernel/process.c + */ +static void apc_swift_idle(void) +{ +#ifdef APC_DEBUG_LED + set_auxio(0x00, AUXIO_LED); +#endif + + apc_writeb(apc_readb(APC_IDLE_REG) | APC_IDLE_ON, APC_IDLE_REG); + +#ifdef APC_DEBUG_LED + set_auxio(AUXIO_LED, 0x00); +#endif +} + +static inline void apc_free(struct platform_device *op) +{ + of_iounmap(&op->resource[0], regs, resource_size(&op->resource[0])); +} + +static int apc_open(struct inode *inode, struct file *f) +{ + return 0; +} + +static int apc_release(struct inode *inode, struct file *f) +{ + return 0; +} + +static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) +{ + __u8 inarg, __user *arg = (__u8 __user *) __arg; + + switch (cmd) { + case APCIOCGFANCTL: + if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg)) + return -EFAULT; + break; + + case APCIOCGCPWR: + if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg)) + return -EFAULT; + break; + + case APCIOCGBPORT: + if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg)) + return -EFAULT; + break; + + case APCIOCSFANCTL: + if (get_user(inarg, arg)) + return -EFAULT; + apc_writeb(inarg & APC_REGMASK, APC_FANCTL_REG); + break; + + case APCIOCSCPWR: + if (get_user(inarg, arg)) + return -EFAULT; + apc_writeb(inarg & APC_REGMASK, APC_CPOWER_REG); + break; + + case APCIOCSBPORT: + if (get_user(inarg, arg)) + return -EFAULT; + apc_writeb(inarg & APC_BPMASK, APC_BPORT_REG); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static const struct file_operations apc_fops = { + .unlocked_ioctl = apc_ioctl, + .open = apc_open, + .release = apc_release, + .llseek = noop_llseek, +}; + +static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops }; + +static int apc_probe(struct platform_device *op) +{ + int err; + + regs = of_ioremap(&op->resource[0], 0, + resource_size(&op->resource[0]), APC_OBPNAME); + if (!regs) { + printk(KERN_ERR "%s: unable to map registers\n", APC_DEVNAME); + return -ENODEV; + } + + err = misc_register(&apc_miscdev); + if (err) { + printk(KERN_ERR "%s: unable to register device\n", APC_DEVNAME); + apc_free(op); + return -ENODEV; + } + + /* Assign power management IDLE handler */ + if (!apc_no_idle) + sparc_idle = apc_swift_idle; + + printk(KERN_INFO "%s: power management initialized%s\n", + APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : ""); + + return 0; +} + +static const struct of_device_id apc_match[] = { + { + .name = APC_OBPNAME, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, apc_match); + +static struct platform_driver apc_driver = { + .driver = { + .name = "apc", + .of_match_table = apc_match, + }, + .probe = apc_probe, +}; + +static int __init apc_init(void) +{ + return platform_driver_register(&apc_driver); +} + +/* This driver is not critical to the boot process + * and is easiest to ioremap when SBus is already + * initialized, so we install ourselves thusly: + */ +__initcall(apc_init); diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c new file mode 100644 index 000000000..5784f2df4 --- /dev/null +++ b/arch/sparc/kernel/asm-offsets.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is used to generate definitions needed by + * assembly language modules. + * + * We use the technique used in the OSF Mach kernel code: + * generate asm statements containing #defines, + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + * + * On sparc, thread_info data is static and TI_XXX offsets are computed by hand. + */ + +#include +#include +// #include +#include + +#include + +#ifdef CONFIG_SPARC32 +int sparc32_foo(void) +{ + DEFINE(AOFF_thread_fork_kpsr, + offsetof(struct thread_struct, fork_kpsr)); + return 0; +} +#else +int sparc64_foo(void) +{ +#ifdef CONFIG_HIBERNATION + BLANK(); + OFFSET(SC_REG_FP, saved_context, fp); + OFFSET(SC_REG_CWP, saved_context, cwp); + OFFSET(SC_REG_WSTATE, saved_context, wstate); + + OFFSET(SC_REG_TICK, saved_context, tick); + OFFSET(SC_REG_PSTATE, saved_context, pstate); + + OFFSET(SC_REG_G4, saved_context, g4); + OFFSET(SC_REG_G5, saved_context, g5); + OFFSET(SC_REG_G6, saved_context, g6); +#endif + return 0; +} +#endif + +int foo(void) +{ + BLANK(); + DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread)); + BLANK(); + DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); + + /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */ + return 0; +} + diff --git a/arch/sparc/kernel/audit.c b/arch/sparc/kernel/audit.c new file mode 100644 index 000000000..b092274ec --- /dev/null +++ b/arch/sparc/kernel/audit.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +#include "kernel.h" + +static unsigned int dir_class[] = { +#include +~0U +}; + +static unsigned int read_class[] = { +#include +~0U +}; + +static unsigned int write_class[] = { +#include +~0U +}; + +static unsigned int chattr_class[] = { +#include +~0U +}; + +static unsigned int signal_class[] = { +#include +~0U +}; + +int audit_classify_arch(int arch) +{ +#ifdef CONFIG_COMPAT + if (arch == AUDIT_ARCH_SPARC) + return 1; +#endif + return 0; +} + +int audit_classify_syscall(int abi, unsigned int syscall) +{ +#ifdef CONFIG_COMPAT + if (abi == AUDIT_ARCH_SPARC) + return sparc32_classify_syscall(syscall); +#endif + switch(syscall) { + case __NR_open: + return AUDITSC_OPEN; + case __NR_openat: + return AUDITSC_OPENAT; + case __NR_socketcall: + return AUDITSC_SOCKETCALL; + case __NR_execve: + return AUDITSC_EXECVE; + case __NR_openat2: + return AUDITSC_OPENAT2; + default: + return AUDITSC_NATIVE; + } +} + +static int __init audit_classes_init(void) +{ +#ifdef CONFIG_COMPAT + audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class); + audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class); + audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class); + audit_register_class(AUDIT_CLASS_CHATTR_32, sparc32_chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL_32, sparc32_signal_class); +#endif + audit_register_class(AUDIT_CLASS_WRITE, write_class); + audit_register_class(AUDIT_CLASS_READ, read_class); + audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); + audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); + return 0; +} + +__initcall(audit_classes_init); diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c new file mode 100644 index 000000000..a32d58817 --- /dev/null +++ b/arch/sparc/kernel/auxio_32.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* auxio.c: Probing for the Sparc AUXIO register at boot time. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include /* memset(), Linux has no bzero() */ +#include + +#include "kernel.h" + +/* Probe and map in the Auxiliary I/O register */ + +/* auxio_register is not static because it is referenced + * in entry.S::floppy_tdone + */ +void __iomem *auxio_register = NULL; +static DEFINE_SPINLOCK(auxio_lock); + +void __init auxio_probe(void) +{ + phandle node, auxio_nd; + struct linux_prom_registers auxregs[1]; + struct resource r; + + switch (sparc_cpu_model) { + case sparc_leon: + case sun4d: + return; + default: + break; + } + node = prom_getchild(prom_root_node); + auxio_nd = prom_searchsiblings(node, "auxiliary-io"); + if(!auxio_nd) { + node = prom_searchsiblings(node, "obio"); + node = prom_getchild(node); + auxio_nd = prom_searchsiblings(node, "auxio"); + if(!auxio_nd) { +#ifdef CONFIG_PCI + /* There may be auxio on Ebus */ + return; +#else + if(prom_searchsiblings(node, "leds")) { + /* VME chassis sun4m machine, no auxio exists. */ + return; + } + prom_printf("Cannot find auxio node, cannot continue...\n"); + prom_halt(); +#endif + } + } + if(prom_getproperty(auxio_nd, "reg", (char *) auxregs, sizeof(auxregs)) <= 0) + return; + prom_apply_obio_ranges(auxregs, 0x1); + /* Map the register both read and write */ + r.flags = auxregs[0].which_io & 0xF; + r.start = auxregs[0].phys_addr; + r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1; + auxio_register = of_ioremap(&r, 0, auxregs[0].reg_size, "auxio"); + /* Fix the address on sun4m. */ + if ((((unsigned long) auxregs[0].phys_addr) & 3) == 3) + auxio_register += (3 - ((unsigned long)auxio_register & 3)); + + set_auxio(AUXIO_LED, 0); +} + +unsigned char get_auxio(void) +{ + if(auxio_register) + return sbus_readb(auxio_register); + return 0; +} +EXPORT_SYMBOL(get_auxio); + +void set_auxio(unsigned char bits_on, unsigned char bits_off) +{ + unsigned char regval; + unsigned long flags; + spin_lock_irqsave(&auxio_lock, flags); + switch (sparc_cpu_model) { + case sun4m: + if(!auxio_register) + break; /* VME chassis sun4m, no auxio. */ + regval = sbus_readb(auxio_register); + sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN4M, + auxio_register); + break; + case sun4d: + break; + default: + panic("Can't set AUXIO register on this machine."); + } + spin_unlock_irqrestore(&auxio_lock, flags); +} +EXPORT_SYMBOL(set_auxio); + +/* sun4m power control register (AUXIO2) */ + +volatile u8 __iomem *auxio_power_register = NULL; + +void __init auxio_power_probe(void) +{ + struct linux_prom_registers regs; + phandle node; + struct resource r; + + /* Attempt to find the sun4m power control node. */ + node = prom_getchild(prom_root_node); + node = prom_searchsiblings(node, "obio"); + node = prom_getchild(node); + node = prom_searchsiblings(node, "power"); + if (node == 0 || (s32)node == -1) + return; + + /* Map the power control register. */ + if (prom_getproperty(node, "reg", (char *)®s, sizeof(regs)) <= 0) + return; + prom_apply_obio_ranges(®s, 1); + memset(&r, 0, sizeof(r)); + r.flags = regs.which_io & 0xF; + r.start = regs.phys_addr; + r.end = regs.phys_addr + regs.reg_size - 1; + auxio_power_register = + (u8 __iomem *)of_ioremap(&r, 0, regs.reg_size, "auxpower"); + + /* Display a quick message on the console. */ + if (auxio_power_register) + printk(KERN_INFO "Power off control detected.\n"); +} diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c new file mode 100644 index 000000000..774a82b0c --- /dev/null +++ b/arch/sparc/kernel/auxio_64.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* auxio.c: Probing for the Sparc AUXIO register at boot time. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * + * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net) + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +void __iomem *auxio_register = NULL; +EXPORT_SYMBOL(auxio_register); + +enum auxio_type { + AUXIO_TYPE_NODEV, + AUXIO_TYPE_SBUS, + AUXIO_TYPE_EBUS +}; + +static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV; +static DEFINE_SPINLOCK(auxio_lock); + +static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus) +{ + if (auxio_register) { + unsigned long flags; + u8 regval, newval; + + spin_lock_irqsave(&auxio_lock, flags); + + regval = (ebus ? + (u8) readl(auxio_register) : + sbus_readb(auxio_register)); + newval = regval | bits_on; + newval &= ~bits_off; + if (!ebus) + newval &= ~AUXIO_AUX1_MASK; + if (ebus) + writel((u32) newval, auxio_register); + else + sbus_writeb(newval, auxio_register); + + spin_unlock_irqrestore(&auxio_lock, flags); + } +} + +static void __auxio_set_bit(u8 bit, int on, int ebus) +{ + u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED); + u8 bits_off = 0; + + if (!on) { + u8 tmp = bits_off; + bits_off = bits_on; + bits_on = tmp; + } + __auxio_rmw(bits_on, bits_off, ebus); +} + +void auxio_set_led(int on) +{ + int ebus = auxio_devtype == AUXIO_TYPE_EBUS; + u8 bit; + + bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED); + __auxio_set_bit(bit, on, ebus); +} +EXPORT_SYMBOL(auxio_set_led); + +static void __auxio_sbus_set_lte(int on) +{ + __auxio_set_bit(AUXIO_AUX1_LTE, on, 0); +} + +void auxio_set_lte(int on) +{ + switch(auxio_devtype) { + case AUXIO_TYPE_SBUS: + __auxio_sbus_set_lte(on); + break; + case AUXIO_TYPE_EBUS: + default: + break; + } +} +EXPORT_SYMBOL(auxio_set_lte); + +static const struct of_device_id auxio_match[] = { + { + .name = "auxio", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, auxio_match); + +static int auxio_probe(struct platform_device *dev) +{ + struct device_node *dp = dev->dev.of_node; + unsigned long size; + + if (of_node_name_eq(dp->parent, "ebus")) { + auxio_devtype = AUXIO_TYPE_EBUS; + size = sizeof(u32); + } else if (of_node_name_eq(dp->parent, "sbus")) { + auxio_devtype = AUXIO_TYPE_SBUS; + size = 1; + } else { + printk("auxio: Unknown parent bus type [%pOFn]\n", + dp->parent); + return -ENODEV; + } + auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); + if (!auxio_register) + return -ENODEV; + + printk(KERN_INFO "AUXIO: Found device at %pOF\n", dp); + + if (auxio_devtype == AUXIO_TYPE_EBUS) + auxio_set_led(AUXIO_LED_ON); + + return 0; +} + +static struct platform_driver auxio_driver = { + .probe = auxio_probe, + .driver = { + .name = "auxio", + .of_match_table = auxio_match, + }, +}; + +static int __init auxio_init(void) +{ + return platform_driver_register(&auxio_driver); +} + +/* Must be after subsys_initcall() so that busses are probed. Must + * be before device_initcall() because things like the floppy driver + * need to use the AUXIO register. + */ +fs_initcall(auxio_init); diff --git a/arch/sparc/kernel/btext.c b/arch/sparc/kernel/btext.c new file mode 100644 index 000000000..e2d3f0d29 --- /dev/null +++ b/arch/sparc/kernel/btext.c @@ -0,0 +1,673 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Procedures for drawing on the screen early on in the boot process. + * + * Benjamin Herrenschmidt + */ +#include +#include +#include +#include + +#include +#include +#include + +#define NO_SCROLL + +#ifndef NO_SCROLL +static void scrollscreen(void); +#endif + +static void draw_byte(unsigned char c, long locX, long locY); +static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb); +static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb); +static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb); + +#define __force_data __section(".data") + +static int g_loc_X __force_data; +static int g_loc_Y __force_data; +static int g_max_loc_X __force_data; +static int g_max_loc_Y __force_data; + +static int dispDeviceRowBytes __force_data; +static int dispDeviceDepth __force_data; +static int dispDeviceRect[4] __force_data; +static unsigned char *dispDeviceBase __force_data; + +#define cmapsz (16*256) + +static unsigned char vga_font[cmapsz]; + +static int __init btext_initialize(phandle node) +{ + unsigned int width, height, depth, pitch; + unsigned long address = 0; + u32 prop; + + if (prom_getproperty(node, "width", (char *)&width, 4) < 0) + return -EINVAL; + if (prom_getproperty(node, "height", (char *)&height, 4) < 0) + return -EINVAL; + if (prom_getproperty(node, "depth", (char *)&depth, 4) < 0) + return -EINVAL; + pitch = width * ((depth + 7) / 8); + + if (prom_getproperty(node, "linebytes", (char *)&prop, 4) >= 0 && + prop != 0xffffffffu) + pitch = prop; + + if (pitch == 1) + pitch = 0x1000; + + if (prom_getproperty(node, "address", (char *)&prop, 4) >= 0) + address = prop; + + /* FIXME: Add support for PCI reg properties. Right now, only + * reliable on macs + */ + if (address == 0) + return -EINVAL; + + g_loc_X = 0; + g_loc_Y = 0; + g_max_loc_X = width / 8; + g_max_loc_Y = height / 16; + dispDeviceBase = (unsigned char *)address; + dispDeviceRowBytes = pitch; + dispDeviceDepth = depth == 15 ? 16 : depth; + dispDeviceRect[0] = dispDeviceRect[1] = 0; + dispDeviceRect[2] = width; + dispDeviceRect[3] = height; + + return 0; +} + +/* Calc the base address of a given point (x,y) */ +static unsigned char * calc_base(int x, int y) +{ + unsigned char *base = dispDeviceBase; + + base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3); + base += (y + dispDeviceRect[1]) * dispDeviceRowBytes; + return base; +} + +static void btext_clearscreen(void) +{ + unsigned int *base = (unsigned int *)calc_base(0, 0); + unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * + (dispDeviceDepth >> 3)) >> 2; + int i,j; + + for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++) + { + unsigned int *ptr = base; + for(j=width; j; --j) + *(ptr++) = 0; + base += (dispDeviceRowBytes >> 2); + } +} + +#ifndef NO_SCROLL +static void scrollscreen(void) +{ + unsigned int *src = (unsigned int *)calc_base(0,16); + unsigned int *dst = (unsigned int *)calc_base(0,0); + unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * + (dispDeviceDepth >> 3)) >> 2; + int i,j; + + for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++) + { + unsigned int *src_ptr = src; + unsigned int *dst_ptr = dst; + for(j=width; j; --j) + *(dst_ptr++) = *(src_ptr++); + src += (dispDeviceRowBytes >> 2); + dst += (dispDeviceRowBytes >> 2); + } + for (i=0; i<16; i++) + { + unsigned int *dst_ptr = dst; + for(j=width; j; --j) + *(dst_ptr++) = 0; + dst += (dispDeviceRowBytes >> 2); + } +} +#endif /* ndef NO_SCROLL */ + +static void btext_drawchar(char c) +{ + int cline = 0; +#ifdef NO_SCROLL + int x; +#endif + switch (c) { + case '\b': + if (g_loc_X > 0) + --g_loc_X; + break; + case '\t': + g_loc_X = (g_loc_X & -8) + 8; + break; + case '\r': + g_loc_X = 0; + break; + case '\n': + g_loc_X = 0; + g_loc_Y++; + cline = 1; + break; + default: + draw_byte(c, g_loc_X++, g_loc_Y); + } + if (g_loc_X >= g_max_loc_X) { + g_loc_X = 0; + g_loc_Y++; + cline = 1; + } +#ifndef NO_SCROLL + while (g_loc_Y >= g_max_loc_Y) { + scrollscreen(); + g_loc_Y--; + } +#else + /* wrap around from bottom to top of screen so we don't + waste time scrolling each line. -- paulus. */ + if (g_loc_Y >= g_max_loc_Y) + g_loc_Y = 0; + if (cline) { + for (x = 0; x < g_max_loc_X; ++x) + draw_byte(' ', x, g_loc_Y); + } +#endif +} + +static void btext_drawtext(const char *c, unsigned int len) +{ + while (len--) + btext_drawchar(*c++); +} + +static void draw_byte(unsigned char c, long locX, long locY) +{ + unsigned char *base = calc_base(locX << 3, locY << 4); + unsigned char *font = &vga_font[((unsigned int)c) * 16]; + int rb = dispDeviceRowBytes; + + switch(dispDeviceDepth) { + case 24: + case 32: + draw_byte_32(font, (unsigned int *)base, rb); + break; + case 15: + case 16: + draw_byte_16(font, (unsigned int *)base, rb); + break; + case 8: + draw_byte_8(font, (unsigned int *)base, rb); + break; + } +} + +static unsigned int expand_bits_8[16] = { + 0x00000000, + 0x000000ff, + 0x0000ff00, + 0x0000ffff, + 0x00ff0000, + 0x00ff00ff, + 0x00ffff00, + 0x00ffffff, + 0xff000000, + 0xff0000ff, + 0xff00ff00, + 0xff00ffff, + 0xffff0000, + 0xffff00ff, + 0xffffff00, + 0xffffffff +}; + +static unsigned int expand_bits_16[4] = { + 0x00000000, + 0x0000ffff, + 0xffff0000, + 0xffffffff +}; + + +static void draw_byte_32(unsigned char *font, unsigned int *base, int rb) +{ + int l, bits; + int fg = 0xFFFFFFFFUL; + int bg = 0x00000000UL; + + for (l = 0; l < 16; ++l) + { + bits = *font++; + base[0] = (-(bits >> 7) & fg) ^ bg; + base[1] = (-((bits >> 6) & 1) & fg) ^ bg; + base[2] = (-((bits >> 5) & 1) & fg) ^ bg; + base[3] = (-((bits >> 4) & 1) & fg) ^ bg; + base[4] = (-((bits >> 3) & 1) & fg) ^ bg; + base[5] = (-((bits >> 2) & 1) & fg) ^ bg; + base[6] = (-((bits >> 1) & 1) & fg) ^ bg; + base[7] = (-(bits & 1) & fg) ^ bg; + base = (unsigned int *) ((char *)base + rb); + } +} + +static void draw_byte_16(unsigned char *font, unsigned int *base, int rb) +{ + int l, bits; + int fg = 0xFFFFFFFFUL; + int bg = 0x00000000UL; + unsigned int *eb = (int *)expand_bits_16; + + for (l = 0; l < 16; ++l) + { + bits = *font++; + base[0] = (eb[bits >> 6] & fg) ^ bg; + base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; + base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; + base[3] = (eb[bits & 3] & fg) ^ bg; + base = (unsigned int *) ((char *)base + rb); + } +} + +static void draw_byte_8(unsigned char *font, unsigned int *base, int rb) +{ + int l, bits; + int fg = 0x0F0F0F0FUL; + int bg = 0x00000000UL; + unsigned int *eb = (int *)expand_bits_8; + + for (l = 0; l < 16; ++l) + { + bits = *font++; + base[0] = (eb[bits >> 4] & fg) ^ bg; + base[1] = (eb[bits & 0xf] & fg) ^ bg; + base = (unsigned int *) ((char *)base + rb); + } +} + +static void btext_console_write(struct console *con, const char *s, + unsigned int n) +{ + btext_drawtext(s, n); +} + +static struct console btext_console = { + .name = "btext", + .write = btext_console_write, + .flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME, + .index = 0, +}; + +int __init btext_find_display(void) +{ + phandle node; + char type[32]; + int ret; + + node = prom_inst2pkg(prom_stdout); + if (prom_getproperty(node, "device_type", type, 32) < 0) + return -ENODEV; + if (strcmp(type, "display")) + return -ENODEV; + + ret = btext_initialize(node); + if (!ret) { + btext_clearscreen(); + register_console(&btext_console); + } + return ret; +} + +static unsigned char vga_font[cmapsz] = { +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd, +0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff, +0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe, +0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, +0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, +0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00, +0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd, +0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e, +0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30, +0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63, +0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8, +0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e, +0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, +0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb, +0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00, +0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6, +0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, +0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0, +0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c, +0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, +0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, +0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c, +0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18, +0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, +0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, +0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30, +0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18, +0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, +0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, +0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe, +0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, +0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18, +0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, +0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, +0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, +0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, +0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde, +0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, +0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0, +0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c, +0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, +0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, +0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c, +0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60, +0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7, +0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, +0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c, +0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c, +0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, +0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, +0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18, +0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, +0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30, +0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, +0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, +0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60, +0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc, +0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60, +0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06, +0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60, +0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb, +0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66, +0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60, +0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30, +0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, +0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, +0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18, +0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, +0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, +0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00, +0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, +0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe, +0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, +0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, +0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, +0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06, +0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe, +0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, +0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, +0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66, +0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6, +0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00, +0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, +0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b, +0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c, +0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00, +0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, +0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, +0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, +0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00, +0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, +0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18, +0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, +0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00, +0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, +0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, +0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, +0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, +0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, +0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, +0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, +0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06, +0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, +0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, +0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36, +0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44, +0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, +0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, +0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, +0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, +0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, +0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, +0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0, +0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, +0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, +0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0, +0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8, +0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66, +0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, +0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66, +0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60, +0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, +0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, +0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, +0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00, +0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, +0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c, +0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00, +0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, +}; diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c new file mode 100644 index 000000000..23f8838dd --- /dev/null +++ b/arch/sparc/kernel/central.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* central.c: Central FHC driver for Sunfire/Starfire/Wildfire. + * + * Copyright (C) 1997, 1999, 2008 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct clock_board { + void __iomem *clock_freq_regs; + void __iomem *clock_regs; + void __iomem *clock_ver_reg; + int num_slots; + struct resource leds_resource; + struct platform_device leds_pdev; +}; + +struct fhc { + void __iomem *pregs; + bool central; + bool jtag_master; + int board_num; + struct resource leds_resource; + struct platform_device leds_pdev; +}; + +static int clock_board_calc_nslots(struct clock_board *p) +{ + u8 reg = upa_readb(p->clock_regs + CLOCK_STAT1) & 0xc0; + + switch (reg) { + case 0x40: + return 16; + + case 0xc0: + return 8; + + case 0x80: + reg = 0; + if (p->clock_ver_reg) + reg = upa_readb(p->clock_ver_reg); + if (reg) { + if (reg & 0x80) + return 4; + else + return 5; + } + fallthrough; + default: + return 4; + } +} + +static int clock_board_probe(struct platform_device *op) +{ + struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL); + int err = -ENOMEM; + + if (!p) { + printk(KERN_ERR "clock_board: Cannot allocate struct clock_board\n"); + goto out; + } + + p->clock_freq_regs = of_ioremap(&op->resource[0], 0, + resource_size(&op->resource[0]), + "clock_board_freq"); + if (!p->clock_freq_regs) { + printk(KERN_ERR "clock_board: Cannot map clock_freq_regs\n"); + goto out_free; + } + + p->clock_regs = of_ioremap(&op->resource[1], 0, + resource_size(&op->resource[1]), + "clock_board_regs"); + if (!p->clock_regs) { + printk(KERN_ERR "clock_board: Cannot map clock_regs\n"); + goto out_unmap_clock_freq_regs; + } + + if (op->resource[2].flags) { + p->clock_ver_reg = of_ioremap(&op->resource[2], 0, + resource_size(&op->resource[2]), + "clock_ver_reg"); + if (!p->clock_ver_reg) { + printk(KERN_ERR "clock_board: Cannot map clock_ver_reg\n"); + goto out_unmap_clock_regs; + } + } + + p->num_slots = clock_board_calc_nslots(p); + + p->leds_resource.start = (unsigned long) + (p->clock_regs + CLOCK_CTRL); + p->leds_resource.end = p->leds_resource.start; + p->leds_resource.name = "leds"; + + p->leds_pdev.name = "sunfire-clockboard-leds"; + p->leds_pdev.id = -1; + p->leds_pdev.resource = &p->leds_resource; + p->leds_pdev.num_resources = 1; + p->leds_pdev.dev.parent = &op->dev; + + err = platform_device_register(&p->leds_pdev); + if (err) { + printk(KERN_ERR "clock_board: Could not register LEDS " + "platform device\n"); + goto out_unmap_clock_ver_reg; + } + + printk(KERN_INFO "clock_board: Detected %d slot Enterprise system.\n", + p->num_slots); + + err = 0; +out: + return err; + +out_unmap_clock_ver_reg: + if (p->clock_ver_reg) + of_iounmap(&op->resource[2], p->clock_ver_reg, + resource_size(&op->resource[2])); + +out_unmap_clock_regs: + of_iounmap(&op->resource[1], p->clock_regs, + resource_size(&op->resource[1])); + +out_unmap_clock_freq_regs: + of_iounmap(&op->resource[0], p->clock_freq_regs, + resource_size(&op->resource[0])); + +out_free: + kfree(p); + goto out; +} + +static const struct of_device_id clock_board_match[] = { + { + .name = "clock-board", + }, + {}, +}; + +static struct platform_driver clock_board_driver = { + .probe = clock_board_probe, + .driver = { + .name = "clock_board", + .of_match_table = clock_board_match, + }, +}; + +static int fhc_probe(struct platform_device *op) +{ + struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL); + int err = -ENOMEM; + u32 reg; + + if (!p) { + printk(KERN_ERR "fhc: Cannot allocate struct fhc\n"); + goto out; + } + + if (of_node_name_eq(op->dev.of_node->parent, "central")) + p->central = true; + + p->pregs = of_ioremap(&op->resource[0], 0, + resource_size(&op->resource[0]), + "fhc_pregs"); + if (!p->pregs) { + printk(KERN_ERR "fhc: Cannot map pregs\n"); + goto out_free; + } + + if (p->central) { + reg = upa_readl(p->pregs + FHC_PREGS_BSR); + p->board_num = ((reg >> 16) & 1) | ((reg >> 12) & 0x0e); + } else { + p->board_num = of_getintprop_default(op->dev.of_node, "board#", -1); + if (p->board_num == -1) { + printk(KERN_ERR "fhc: No board# property\n"); + goto out_unmap_pregs; + } + if (upa_readl(p->pregs + FHC_PREGS_JCTRL) & FHC_JTAG_CTRL_MENAB) + p->jtag_master = true; + } + + if (!p->central) { + p->leds_resource.start = (unsigned long) + (p->pregs + FHC_PREGS_CTRL); + p->leds_resource.end = p->leds_resource.start; + p->leds_resource.name = "leds"; + + p->leds_pdev.name = "sunfire-fhc-leds"; + p->leds_pdev.id = p->board_num; + p->leds_pdev.resource = &p->leds_resource; + p->leds_pdev.num_resources = 1; + p->leds_pdev.dev.parent = &op->dev; + + err = platform_device_register(&p->leds_pdev); + if (err) { + printk(KERN_ERR "fhc: Could not register LEDS " + "platform device\n"); + goto out_unmap_pregs; + } + } + reg = upa_readl(p->pregs + FHC_PREGS_CTRL); + + if (!p->central) + reg |= FHC_CONTROL_IXIST; + + reg &= ~(FHC_CONTROL_AOFF | + FHC_CONTROL_BOFF | + FHC_CONTROL_SLINE); + + upa_writel(reg, p->pregs + FHC_PREGS_CTRL); + upa_readl(p->pregs + FHC_PREGS_CTRL); + + reg = upa_readl(p->pregs + FHC_PREGS_ID); + printk(KERN_INFO "fhc: Board #%d, Version[%x] PartID[%x] Manuf[%x] %s\n", + p->board_num, + (reg & FHC_ID_VERS) >> 28, + (reg & FHC_ID_PARTID) >> 12, + (reg & FHC_ID_MANUF) >> 1, + (p->jtag_master ? + "(JTAG Master)" : + (p->central ? "(Central)" : ""))); + + err = 0; + +out: + return err; + +out_unmap_pregs: + of_iounmap(&op->resource[0], p->pregs, resource_size(&op->resource[0])); + +out_free: + kfree(p); + goto out; +} + +static const struct of_device_id fhc_match[] = { + { + .name = "fhc", + }, + {}, +}; + +static struct platform_driver fhc_driver = { + .probe = fhc_probe, + .driver = { + .name = "fhc", + .of_match_table = fhc_match, + }, +}; + +static int __init sunfire_init(void) +{ + (void) platform_driver_register(&fhc_driver); + (void) platform_driver_register(&clock_board_driver); + return 0; +} + +fs_initcall(sunfire_init); diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S new file mode 100644 index 000000000..7f3d3d264 --- /dev/null +++ b/arch/sparc/kernel/cherrs.S @@ -0,0 +1,576 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + /* These get patched into the trap table at boot time + * once we know we have a cheetah processor. + */ + .globl cheetah_fecc_trap_vector + .type cheetah_fecc_trap_vector,#function +cheetah_fecc_trap_vector: + membar #Sync + ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 + andn %g1, DCU_DC | DCU_IC, %g1 + stxa %g1, [%g0] ASI_DCU_CONTROL_REG + membar #Sync + sethi %hi(cheetah_fast_ecc), %g2 + jmpl %g2 + %lo(cheetah_fast_ecc), %g0 + mov 0, %g1 + .size cheetah_fecc_trap_vector,.-cheetah_fecc_trap_vector + + .globl cheetah_fecc_trap_vector_tl1 + .type cheetah_fecc_trap_vector_tl1,#function +cheetah_fecc_trap_vector_tl1: + membar #Sync + ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 + andn %g1, DCU_DC | DCU_IC, %g1 + stxa %g1, [%g0] ASI_DCU_CONTROL_REG + membar #Sync + sethi %hi(cheetah_fast_ecc), %g2 + jmpl %g2 + %lo(cheetah_fast_ecc), %g0 + mov 1, %g1 + .size cheetah_fecc_trap_vector_tl1,.-cheetah_fecc_trap_vector_tl1 + + .globl cheetah_cee_trap_vector + .type cheetah_cee_trap_vector,#function +cheetah_cee_trap_vector: + membar #Sync + ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 + andn %g1, DCU_IC, %g1 + stxa %g1, [%g0] ASI_DCU_CONTROL_REG + membar #Sync + sethi %hi(cheetah_cee), %g2 + jmpl %g2 + %lo(cheetah_cee), %g0 + mov 0, %g1 + .size cheetah_cee_trap_vector,.-cheetah_cee_trap_vector + + .globl cheetah_cee_trap_vector_tl1 + .type cheetah_cee_trap_vector_tl1,#function +cheetah_cee_trap_vector_tl1: + membar #Sync + ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 + andn %g1, DCU_IC, %g1 + stxa %g1, [%g0] ASI_DCU_CONTROL_REG + membar #Sync + sethi %hi(cheetah_cee), %g2 + jmpl %g2 + %lo(cheetah_cee), %g0 + mov 1, %g1 + .size cheetah_cee_trap_vector_tl1,.-cheetah_cee_trap_vector_tl1 + + .globl cheetah_deferred_trap_vector + .type cheetah_deferred_trap_vector,#function +cheetah_deferred_trap_vector: + membar #Sync + ldxa [%g0] ASI_DCU_CONTROL_REG, %g1; + andn %g1, DCU_DC | DCU_IC, %g1; + stxa %g1, [%g0] ASI_DCU_CONTROL_REG; + membar #Sync; + sethi %hi(cheetah_deferred_trap), %g2 + jmpl %g2 + %lo(cheetah_deferred_trap), %g0 + mov 0, %g1 + .size cheetah_deferred_trap_vector,.-cheetah_deferred_trap_vector + + .globl cheetah_deferred_trap_vector_tl1 + .type cheetah_deferred_trap_vector_tl1,#function +cheetah_deferred_trap_vector_tl1: + membar #Sync; + ldxa [%g0] ASI_DCU_CONTROL_REG, %g1; + andn %g1, DCU_DC | DCU_IC, %g1; + stxa %g1, [%g0] ASI_DCU_CONTROL_REG; + membar #Sync; + sethi %hi(cheetah_deferred_trap), %g2 + jmpl %g2 + %lo(cheetah_deferred_trap), %g0 + mov 1, %g1 + .size cheetah_deferred_trap_vector_tl1,.-cheetah_deferred_trap_vector_tl1 + + /* Cheetah+ specific traps. These are for the new I/D cache parity + * error traps. The first argument to cheetah_plus_parity_handler + * is encoded as follows: + * + * Bit0: 0=dcache,1=icache + * Bit1: 0=recoverable,1=unrecoverable + */ + .globl cheetah_plus_dcpe_trap_vector + .type cheetah_plus_dcpe_trap_vector,#function +cheetah_plus_dcpe_trap_vector: + membar #Sync + sethi %hi(do_cheetah_plus_data_parity), %g7 + jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0 + nop + nop + nop + nop + nop + .size cheetah_plus_dcpe_trap_vector,.-cheetah_plus_dcpe_trap_vector + + .type do_cheetah_plus_data_parity,#function +do_cheetah_plus_data_parity: + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + mov 0x0, %o0 + call cheetah_plus_parity_error + add %sp, PTREGS_OFF, %o1 + ba,a,pt %xcc, rtrap_irq + .size do_cheetah_plus_data_parity,.-do_cheetah_plus_data_parity + + .globl cheetah_plus_dcpe_trap_vector_tl1 + .type cheetah_plus_dcpe_trap_vector_tl1,#function +cheetah_plus_dcpe_trap_vector_tl1: + membar #Sync + wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate + sethi %hi(do_dcpe_tl1), %g3 + jmpl %g3 + %lo(do_dcpe_tl1), %g0 + nop + nop + nop + nop + .size cheetah_plus_dcpe_trap_vector_tl1,.-cheetah_plus_dcpe_trap_vector_tl1 + + .globl cheetah_plus_icpe_trap_vector + .type cheetah_plus_icpe_trap_vector,#function +cheetah_plus_icpe_trap_vector: + membar #Sync + sethi %hi(do_cheetah_plus_insn_parity), %g7 + jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0 + nop + nop + nop + nop + nop + .size cheetah_plus_icpe_trap_vector,.-cheetah_plus_icpe_trap_vector + + .type do_cheetah_plus_insn_parity,#function +do_cheetah_plus_insn_parity: + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + mov 0x1, %o0 + call cheetah_plus_parity_error + add %sp, PTREGS_OFF, %o1 + ba,a,pt %xcc, rtrap_irq + .size do_cheetah_plus_insn_parity,.-do_cheetah_plus_insn_parity + + .globl cheetah_plus_icpe_trap_vector_tl1 + .type cheetah_plus_icpe_trap_vector_tl1,#function +cheetah_plus_icpe_trap_vector_tl1: + membar #Sync + wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate + sethi %hi(do_icpe_tl1), %g3 + jmpl %g3 + %lo(do_icpe_tl1), %g0 + nop + nop + nop + nop + .size cheetah_plus_icpe_trap_vector_tl1,.-cheetah_plus_icpe_trap_vector_tl1 + + /* If we take one of these traps when tl >= 1, then we + * jump to interrupt globals. If some trap level above us + * was also using interrupt globals, we cannot recover. + * We may use all interrupt global registers except %g6. + */ + .globl do_dcpe_tl1 + .type do_dcpe_tl1,#function +do_dcpe_tl1: + rdpr %tl, %g1 ! Save original trap level + mov 1, %g2 ! Setup TSTATE checking loop + sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit +1: wrpr %g2, %tl ! Set trap level to check + rdpr %tstate, %g4 ! Read TSTATE for this level + andcc %g4, %g3, %g0 ! Interrupt globals in use? + bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable + wrpr %g1, %tl ! Restore original trap level + add %g2, 1, %g2 ! Next trap level + cmp %g2, %g1 ! Hit them all yet? + ble,pt %icc, 1b ! Not yet + nop + wrpr %g1, %tl ! Restore original trap level +do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ + sethi %hi(dcache_parity_tl1_occurred), %g2 + lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1 + add %g1, 1, %g1 + stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)] + /* Reset D-cache parity */ + sethi %hi(1 << 16), %g1 ! D-cache size + mov (1 << 5), %g2 ! D-cache line size + sub %g1, %g2, %g1 ! Move down 1 cacheline +1: srl %g1, 14, %g3 ! Compute UTAG + membar #Sync + stxa %g3, [%g1] ASI_DCACHE_UTAG + membar #Sync + sub %g2, 8, %g3 ! 64-bit data word within line +2: membar #Sync + stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA + membar #Sync + subcc %g3, 8, %g3 ! Next 64-bit data word + bge,pt %icc, 2b + nop + subcc %g1, %g2, %g1 ! Next cacheline + bge,pt %icc, 1b + nop + ba,a,pt %xcc, dcpe_icpe_tl1_common + +do_dcpe_tl1_fatal: + sethi %hi(1f), %g7 + ba,pt %xcc, etraptl1 +1: or %g7, %lo(1b), %g7 + mov 0x2, %o0 + call cheetah_plus_parity_error + add %sp, PTREGS_OFF, %o1 + ba,a,pt %xcc, rtrap + .size do_dcpe_tl1,.-do_dcpe_tl1 + + .globl do_icpe_tl1 + .type do_icpe_tl1,#function +do_icpe_tl1: + rdpr %tl, %g1 ! Save original trap level + mov 1, %g2 ! Setup TSTATE checking loop + sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit +1: wrpr %g2, %tl ! Set trap level to check + rdpr %tstate, %g4 ! Read TSTATE for this level + andcc %g4, %g3, %g0 ! Interrupt globals in use? + bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable + wrpr %g1, %tl ! Restore original trap level + add %g2, 1, %g2 ! Next trap level + cmp %g2, %g1 ! Hit them all yet? + ble,pt %icc, 1b ! Not yet + nop + wrpr %g1, %tl ! Restore original trap level +do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ + sethi %hi(icache_parity_tl1_occurred), %g2 + lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1 + add %g1, 1, %g1 + stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)] + /* Flush I-cache */ + sethi %hi(1 << 15), %g1 ! I-cache size + mov (1 << 5), %g2 ! I-cache line size + sub %g1, %g2, %g1 +1: or %g1, (2 << 3), %g3 + stxa %g0, [%g3] ASI_IC_TAG + membar #Sync + subcc %g1, %g2, %g1 + bge,pt %icc, 1b + nop + ba,a,pt %xcc, dcpe_icpe_tl1_common + +do_icpe_tl1_fatal: + sethi %hi(1f), %g7 + ba,pt %xcc, etraptl1 +1: or %g7, %lo(1b), %g7 + mov 0x3, %o0 + call cheetah_plus_parity_error + add %sp, PTREGS_OFF, %o1 + ba,a,pt %xcc, rtrap + .size do_icpe_tl1,.-do_icpe_tl1 + + .type dcpe_icpe_tl1_common,#function +dcpe_icpe_tl1_common: + /* Flush D-cache, re-enable D/I caches in DCU and finally + * retry the trapping instruction. + */ + sethi %hi(1 << 16), %g1 ! D-cache size + mov (1 << 5), %g2 ! D-cache line size + sub %g1, %g2, %g1 +1: stxa %g0, [%g1] ASI_DCACHE_TAG + membar #Sync + subcc %g1, %g2, %g1 + bge,pt %icc, 1b + nop + ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 + or %g1, (DCU_DC | DCU_IC), %g1 + stxa %g1, [%g0] ASI_DCU_CONTROL_REG + membar #Sync + retry + .size dcpe_icpe_tl1_common,.-dcpe_icpe_tl1_common + + /* Capture I/D/E-cache state into per-cpu error scoreboard. + * + * %g1: (TL>=0) ? 1 : 0 + * %g2: scratch + * %g3: scratch + * %g4: AFSR + * %g5: AFAR + * %g6: unused, will have current thread ptr after etrap + * %g7: scratch + */ + .type __cheetah_log_error,#function +__cheetah_log_error: + /* Put "TL1" software bit into AFSR. */ + and %g1, 0x1, %g1 + sllx %g1, 63, %g2 + or %g4, %g2, %g4 + + /* Get log entry pointer for this cpu at this trap level. */ + BRANCH_IF_JALAPENO(g2,g3,50f) + ldxa [%g0] ASI_SAFARI_CONFIG, %g2 + srlx %g2, 17, %g2 + ba,pt %xcc, 60f + and %g2, 0x3ff, %g2 + +50: ldxa [%g0] ASI_JBUS_CONFIG, %g2 + srlx %g2, 17, %g2 + and %g2, 0x1f, %g2 + +60: sllx %g2, 9, %g2 + sethi %hi(cheetah_error_log), %g3 + ldx [%g3 + %lo(cheetah_error_log)], %g3 + brz,pn %g3, 80f + nop + + add %g3, %g2, %g3 + sllx %g1, 8, %g1 + add %g3, %g1, %g1 + + /* %g1 holds pointer to the top of the logging scoreboard */ + ldx [%g1 + 0x0], %g7 + cmp %g7, -1 + bne,pn %xcc, 80f + nop + + stx %g4, [%g1 + 0x0] + stx %g5, [%g1 + 0x8] + add %g1, 0x10, %g1 + + /* %g1 now points to D-cache logging area */ + set 0x3ff8, %g2 /* DC_addr mask */ + and %g5, %g2, %g2 /* DC_addr bits of AFAR */ + srlx %g5, 12, %g3 + or %g3, 1, %g3 /* PHYS tag + valid */ + +10: ldxa [%g2] ASI_DCACHE_TAG, %g7 + cmp %g3, %g7 /* TAG match? */ + bne,pt %xcc, 13f + nop + + /* Yep, what we want, capture state. */ + stx %g2, [%g1 + 0x20] + stx %g7, [%g1 + 0x28] + + /* A membar Sync is required before and after utag access. */ + membar #Sync + ldxa [%g2] ASI_DCACHE_UTAG, %g7 + membar #Sync + stx %g7, [%g1 + 0x30] + ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7 + stx %g7, [%g1 + 0x38] + clr %g3 + +12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7 + stx %g7, [%g1] + add %g3, (1 << 5), %g3 + cmp %g3, (4 << 5) + bl,pt %xcc, 12b + add %g1, 0x8, %g1 + + ba,pt %xcc, 20f + add %g1, 0x20, %g1 + +13: sethi %hi(1 << 14), %g7 + add %g2, %g7, %g2 + srlx %g2, 14, %g7 + cmp %g7, 4 + bl,pt %xcc, 10b + nop + + add %g1, 0x40, %g1 + + /* %g1 now points to I-cache logging area */ +20: set 0x1fe0, %g2 /* IC_addr mask */ + and %g5, %g2, %g2 /* IC_addr bits of AFAR */ + sllx %g2, 1, %g2 /* IC_addr[13:6]==VA[12:5] */ + srlx %g5, (13 - 8), %g3 /* Make PTAG */ + andn %g3, 0xff, %g3 /* Mask off undefined bits */ + +21: ldxa [%g2] ASI_IC_TAG, %g7 + andn %g7, 0xff, %g7 + cmp %g3, %g7 + bne,pt %xcc, 23f + nop + + /* Yep, what we want, capture state. */ + stx %g2, [%g1 + 0x40] + stx %g7, [%g1 + 0x48] + add %g2, (1 << 3), %g2 + ldxa [%g2] ASI_IC_TAG, %g7 + add %g2, (1 << 3), %g2 + stx %g7, [%g1 + 0x50] + ldxa [%g2] ASI_IC_TAG, %g7 + add %g2, (1 << 3), %g2 + stx %g7, [%g1 + 0x60] + ldxa [%g2] ASI_IC_TAG, %g7 + stx %g7, [%g1 + 0x68] + sub %g2, (3 << 3), %g2 + ldxa [%g2] ASI_IC_STAG, %g7 + stx %g7, [%g1 + 0x58] + clr %g3 + srlx %g2, 2, %g2 + +22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7 + stx %g7, [%g1] + add %g3, (1 << 3), %g3 + cmp %g3, (8 << 3) + bl,pt %xcc, 22b + add %g1, 0x8, %g1 + + ba,pt %xcc, 30f + add %g1, 0x30, %g1 + +23: sethi %hi(1 << 14), %g7 + add %g2, %g7, %g2 + srlx %g2, 14, %g7 + cmp %g7, 4 + bl,pt %xcc, 21b + nop + + add %g1, 0x70, %g1 + + /* %g1 now points to E-cache logging area */ +30: andn %g5, (32 - 1), %g2 + stx %g2, [%g1 + 0x20] + ldxa [%g2] ASI_EC_TAG_DATA, %g7 + stx %g7, [%g1 + 0x28] + ldxa [%g2] ASI_EC_R, %g0 + clr %g3 + +31: ldxa [%g3] ASI_EC_DATA, %g7 + stx %g7, [%g1 + %g3] + add %g3, 0x8, %g3 + cmp %g3, 0x20 + + bl,pt %xcc, 31b + nop +80: + rdpr %tt, %g2 + cmp %g2, 0x70 + be c_fast_ecc + cmp %g2, 0x63 + be c_cee + nop + ba,a,pt %xcc, c_deferred + .size __cheetah_log_error,.-__cheetah_log_error + + /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc + * in the trap table. That code has done a memory barrier + * and has disabled both the I-cache and D-cache in the DCU + * control register. The I-cache is disabled so that we may + * capture the corrupted cache line, and the D-cache is disabled + * because corrupt data may have been placed there and we don't + * want to reference it. + * + * %g1 is one if this trap occurred at %tl >= 1. + * + * Next, we turn off error reporting so that we don't recurse. + */ + .globl cheetah_fast_ecc + .type cheetah_fast_ecc,#function +cheetah_fast_ecc: + ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 + andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2 + stxa %g2, [%g0] ASI_ESTATE_ERROR_EN + membar #Sync + + /* Fetch and clear AFSR/AFAR */ + ldxa [%g0] ASI_AFSR, %g4 + ldxa [%g0] ASI_AFAR, %g5 + stxa %g4, [%g0] ASI_AFSR + membar #Sync + + ba,pt %xcc, __cheetah_log_error + nop + .size cheetah_fast_ecc,.-cheetah_fast_ecc + + .type c_fast_ecc,#function +c_fast_ecc: + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + mov %l4, %o1 + mov %l5, %o2 + call cheetah_fecc_handler + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_irq + .size c_fast_ecc,.-c_fast_ecc + + /* Our caller has disabled I-cache and performed membar Sync. */ + .globl cheetah_cee + .type cheetah_cee,#function +cheetah_cee: + ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 + andn %g2, ESTATE_ERROR_CEEN, %g2 + stxa %g2, [%g0] ASI_ESTATE_ERROR_EN + membar #Sync + + /* Fetch and clear AFSR/AFAR */ + ldxa [%g0] ASI_AFSR, %g4 + ldxa [%g0] ASI_AFAR, %g5 + stxa %g4, [%g0] ASI_AFSR + membar #Sync + + ba,pt %xcc, __cheetah_log_error + nop + .size cheetah_cee,.-cheetah_cee + + .type c_cee,#function +c_cee: + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + mov %l4, %o1 + mov %l5, %o2 + call cheetah_cee_handler + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_irq + .size c_cee,.-c_cee + + /* Our caller has disabled I-cache+D-cache and performed membar Sync. */ + .globl cheetah_deferred_trap + .type cheetah_deferred_trap,#function +cheetah_deferred_trap: + ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 + andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2 + stxa %g2, [%g0] ASI_ESTATE_ERROR_EN + membar #Sync + + /* Fetch and clear AFSR/AFAR */ + ldxa [%g0] ASI_AFSR, %g4 + ldxa [%g0] ASI_AFAR, %g5 + stxa %g4, [%g0] ASI_AFSR + membar #Sync + + ba,pt %xcc, __cheetah_log_error + nop + .size cheetah_deferred_trap,.-cheetah_deferred_trap + + .type c_deferred,#function +c_deferred: + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + mov %l4, %o1 + mov %l5, %o2 + call cheetah_deferred_handler + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_irq + .size c_deferred,.-c_deferred diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c new file mode 100644 index 000000000..6ff43df74 --- /dev/null +++ b/arch/sparc/kernel/chmc.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* chmc.c: Driver for UltraSPARC-III memory controller. + * + * Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_MODULE_NAME "chmc" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "0.2" + +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("UltraSPARC-III memory controller driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static int mc_type; +#define MC_TYPE_SAFARI 1 +#define MC_TYPE_JBUS 2 + +static dimm_printer_t us3mc_dimm_printer; + +#define CHMCTRL_NDGRPS 2 +#define CHMCTRL_NDIMMS 4 + +#define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS) + +/* OBP memory-layout property format. */ +struct chmc_obp_map { + unsigned char dimm_map[144]; + unsigned char pin_map[576]; +}; + +#define DIMM_LABEL_SZ 8 + +struct chmc_obp_mem_layout { + /* One max 8-byte string label per DIMM. Usually + * this matches the label on the motherboard where + * that DIMM resides. + */ + char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ]; + + /* If symmetric use map[0], else it is + * asymmetric and map[1] should be used. + */ + char symmetric; + + struct chmc_obp_map map[2]; +}; + +#define CHMCTRL_NBANKS 4 + +struct chmc_bank_info { + struct chmc *p; + int bank_id; + + u64 raw_reg; + int valid; + int uk; + int um; + int lk; + int lm; + int interleave; + unsigned long base; + unsigned long size; +}; + +struct chmc { + struct list_head list; + int portid; + + struct chmc_obp_mem_layout layout_prop; + int layout_size; + + void __iomem *regs; + + u64 timing_control1; + u64 timing_control2; + u64 timing_control3; + u64 timing_control4; + u64 memaddr_control; + + struct chmc_bank_info logical_banks[CHMCTRL_NBANKS]; +}; + +#define JBUSMC_REGS_SIZE 8 + +#define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL +#define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL +#define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL +#define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL +#define JB_MC_REG1_XOR 0x0000010000000000UL +#define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL +#define JB_MC_REG1_ADDR_GEN_2_SHIFT 37 +#define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL +#define JB_MC_REG1_ADDR_GEN_1_SHIFT 34 +#define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL +#define JB_MC_REG1_INTERLEAVE_SHIFT 23 +#define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL +#define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21 +#define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL +#define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20 + +#define PART_TYPE_X8 0 +#define PART_TYPE_X4 1 + +#define INTERLEAVE_NONE 0 +#define INTERLEAVE_SAME 1 +#define INTERLEAVE_INTERNAL 2 +#define INTERLEAVE_BOTH 3 + +#define ADDR_GEN_128MB 0 +#define ADDR_GEN_256MB 1 +#define ADDR_GEN_512MB 2 +#define ADDR_GEN_1GB 3 + +#define JB_NUM_DIMM_GROUPS 2 +#define JB_NUM_DIMMS_PER_GROUP 2 +#define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP) + +struct jbusmc_obp_map { + unsigned char dimm_map[18]; + unsigned char pin_map[144]; +}; + +struct jbusmc_obp_mem_layout { + /* One max 8-byte string label per DIMM. Usually + * this matches the label on the motherboard where + * that DIMM resides. + */ + char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ]; + + /* If symmetric use map[0], else it is + * asymmetric and map[1] should be used. + */ + char symmetric; + + struct jbusmc_obp_map map; + + char _pad; +}; + +struct jbusmc_dimm_group { + struct jbusmc *controller; + int index; + u64 base_addr; + u64 size; +}; + +struct jbusmc { + void __iomem *regs; + u64 mc_reg_1; + u32 portid; + struct jbusmc_obp_mem_layout layout; + int layout_len; + int num_dimm_groups; + struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS]; + struct list_head list; +}; + +static DEFINE_SPINLOCK(mctrl_list_lock); +static LIST_HEAD(mctrl_list); + +static void mc_list_add(struct list_head *list) +{ + spin_lock(&mctrl_list_lock); + list_add(list, &mctrl_list); + spin_unlock(&mctrl_list_lock); +} + +static void mc_list_del(struct list_head *list) +{ + spin_lock(&mctrl_list_lock); + list_del_init(list); + spin_unlock(&mctrl_list_lock); +} + +#define SYNDROME_MIN -1 +#define SYNDROME_MAX 144 + +/* Covert syndrome code into the way the bits are positioned + * on the bus. + */ +static int syndrome_to_qword_code(int syndrome_code) +{ + if (syndrome_code < 128) + syndrome_code += 16; + else if (syndrome_code < 128 + 9) + syndrome_code -= (128 - 7); + else if (syndrome_code < (128 + 9 + 3)) + syndrome_code -= (128 + 9 - 4); + else + syndrome_code -= (128 + 9 + 3); + return syndrome_code; +} + +/* All this magic has to do with how a cache line comes over the wire + * on Safari and JBUS. A 64-bit line comes over in 1 or more quadword + * cycles, each of which transmit ECC/MTAG info as well as the actual + * data. + */ +#define L2_LINE_SIZE 64 +#define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1) +#define QW_PER_LINE 4 +#define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE) +#define QW_BITS 144 +#define SAFARI_LAST_BIT (576 - 1) +#define JBUS_LAST_BIT (144 - 1) + +static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr, + int *pin_p, char **dimm_str_p, void *_prop, + int base_dimm_offset) +{ + int qword_code = syndrome_to_qword_code(syndrome_code); + int cache_line_offset; + int offset_inverse; + int dimm_map_index; + int map_val; + + if (mc_type == MC_TYPE_JBUS) { + struct jbusmc_obp_mem_layout *p = _prop; + + /* JBUS */ + cache_line_offset = qword_code; + offset_inverse = (JBUS_LAST_BIT - cache_line_offset); + dimm_map_index = offset_inverse / 8; + map_val = p->map.dimm_map[dimm_map_index]; + map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1); + *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; + *pin_p = p->map.pin_map[cache_line_offset]; + } else { + struct chmc_obp_mem_layout *p = _prop; + struct chmc_obp_map *mp; + int qword; + + /* Safari */ + if (p->symmetric) + mp = &p->map[0]; + else + mp = &p->map[1]; + + qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES; + cache_line_offset = ((3 - qword) * QW_BITS) + qword_code; + offset_inverse = (SAFARI_LAST_BIT - cache_line_offset); + dimm_map_index = offset_inverse >> 2; + map_val = mp->dimm_map[dimm_map_index]; + map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3); + *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; + *pin_p = mp->pin_map[cache_line_offset]; + } +} + +static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr) +{ + struct jbusmc *p; + + list_for_each_entry(p, &mctrl_list, list) { + int i; + + for (i = 0; i < p->num_dimm_groups; i++) { + struct jbusmc_dimm_group *dp = &p->dimm_groups[i]; + + if (phys_addr < dp->base_addr || + (dp->base_addr + dp->size) <= phys_addr) + continue; + + return dp; + } + } + return NULL; +} + +static int jbusmc_print_dimm(int syndrome_code, + unsigned long phys_addr, + char *buf, int buflen) +{ + struct jbusmc_obp_mem_layout *prop; + struct jbusmc_dimm_group *dp; + struct jbusmc *p; + int first_dimm; + + dp = jbusmc_find_dimm_group(phys_addr); + if (dp == NULL || + syndrome_code < SYNDROME_MIN || + syndrome_code > SYNDROME_MAX) { + buf[0] = '?'; + buf[1] = '?'; + buf[2] = '?'; + buf[3] = '\0'; + return 0; + } + p = dp->controller; + prop = &p->layout; + + first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP; + + if (syndrome_code != SYNDROME_MIN) { + char *dimm_str; + int pin; + + get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, + &dimm_str, prop, first_dimm); + sprintf(buf, "%s, pin %3d", dimm_str, pin); + } else { + int dimm; + + /* Multi-bit error, we just dump out all the + * dimm labels associated with this dimm group. + */ + for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) { + sprintf(buf, "%s ", + prop->dimm_labels[first_dimm + dimm]); + buf += strlen(buf); + } + } + + return 0; +} + +static u64 jbusmc_dimm_group_size(u64 base, + const struct linux_prom64_registers *mem_regs, + int num_mem_regs) +{ + u64 max = base + (8UL * 1024 * 1024 * 1024); + u64 max_seen = base; + int i; + + for (i = 0; i < num_mem_regs; i++) { + const struct linux_prom64_registers *ent; + u64 this_base; + u64 this_end; + + ent = &mem_regs[i]; + this_base = ent->phys_addr; + this_end = this_base + ent->reg_size; + if (base < this_base || base >= this_end) + continue; + if (this_end > max) + this_end = max; + if (this_end > max_seen) + max_seen = this_end; + } + + return max_seen - base; +} + +static void jbusmc_construct_one_dimm_group(struct jbusmc *p, + unsigned long index, + const struct linux_prom64_registers *mem_regs, + int num_mem_regs) +{ + struct jbusmc_dimm_group *dp = &p->dimm_groups[index]; + + dp->controller = p; + dp->index = index; + + dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024)); + dp->base_addr += (index * (8UL * 1024 * 1024 * 1024)); + dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs); +} + +static void jbusmc_construct_dimm_groups(struct jbusmc *p, + const struct linux_prom64_registers *mem_regs, + int num_mem_regs) +{ + if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) { + jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs); + p->num_dimm_groups++; + } + if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) { + jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs); + p->num_dimm_groups++; + } +} + +static int jbusmc_probe(struct platform_device *op) +{ + const struct linux_prom64_registers *mem_regs; + struct device_node *mem_node; + int err, len, num_mem_regs; + struct jbusmc *p; + const u32 *prop; + const void *ml; + + err = -ENODEV; + mem_node = of_find_node_by_path("/memory"); + if (!mem_node) { + printk(KERN_ERR PFX "Cannot find /memory node.\n"); + goto out; + } + mem_regs = of_get_property(mem_node, "reg", &len); + if (!mem_regs) { + printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n"); + goto out; + } + num_mem_regs = len / sizeof(*mem_regs); + + err = -ENOMEM; + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n"); + goto out; + } + + INIT_LIST_HEAD(&p->list); + + err = -ENODEV; + prop = of_get_property(op->dev.of_node, "portid", &len); + if (!prop || len != 4) { + printk(KERN_ERR PFX "Cannot find portid.\n"); + goto out_free; + } + + p->portid = *prop; + + prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len); + if (!prop || len != 8) { + printk(KERN_ERR PFX "Cannot get memory control register 1.\n"); + goto out_free; + } + + p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1]; + + err = -ENOMEM; + p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc"); + if (!p->regs) { + printk(KERN_ERR PFX "Cannot map jbusmc regs.\n"); + goto out_free; + } + + err = -ENODEV; + ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len); + if (!ml) { + printk(KERN_ERR PFX "Cannot get memory layout property.\n"); + goto out_iounmap; + } + if (p->layout_len > sizeof(p->layout)) { + printk(KERN_ERR PFX "Unexpected memory-layout size %d\n", + p->layout_len); + goto out_iounmap; + } + memcpy(&p->layout, ml, p->layout_len); + + jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs); + + mc_list_add(&p->list); + + printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %pOF\n", + op->dev.of_node); + + dev_set_drvdata(&op->dev, p); + + err = 0; + +out: + return err; + +out_iounmap: + of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); + +out_free: + kfree(p); + goto out; +} + +/* Does BANK decode PHYS_ADDR? */ +static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr) +{ + unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT; + unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT; + + /* Bank must be enabled to match. */ + if (bp->valid == 0) + return 0; + + /* Would BANK match upper bits? */ + upper_bits ^= bp->um; /* What bits are different? */ + upper_bits = ~upper_bits; /* Invert. */ + upper_bits |= bp->uk; /* What bits don't matter for matching? */ + upper_bits = ~upper_bits; /* Invert. */ + + if (upper_bits) + return 0; + + /* Would BANK match lower bits? */ + lower_bits ^= bp->lm; /* What bits are different? */ + lower_bits = ~lower_bits; /* Invert. */ + lower_bits |= bp->lk; /* What bits don't matter for matching? */ + lower_bits = ~lower_bits; /* Invert. */ + + if (lower_bits) + return 0; + + /* I always knew you'd be the one. */ + return 1; +} + +/* Given PHYS_ADDR, search memory controller banks for a match. */ +static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr) +{ + struct chmc *p; + + list_for_each_entry(p, &mctrl_list, list) { + int bank_no; + + for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) { + struct chmc_bank_info *bp; + + bp = &p->logical_banks[bank_no]; + if (chmc_bank_match(bp, phys_addr)) + return bp; + } + } + + return NULL; +} + +/* This is the main purpose of this driver. */ +static int chmc_print_dimm(int syndrome_code, + unsigned long phys_addr, + char *buf, int buflen) +{ + struct chmc_bank_info *bp; + struct chmc_obp_mem_layout *prop; + int bank_in_controller, first_dimm; + + bp = chmc_find_bank(phys_addr); + if (bp == NULL || + syndrome_code < SYNDROME_MIN || + syndrome_code > SYNDROME_MAX) { + buf[0] = '?'; + buf[1] = '?'; + buf[2] = '?'; + buf[3] = '\0'; + return 0; + } + + prop = &bp->p->layout_prop; + bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1); + first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1)); + first_dimm *= CHMCTRL_NDIMMS; + + if (syndrome_code != SYNDROME_MIN) { + char *dimm_str; + int pin; + + get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, + &dimm_str, prop, first_dimm); + sprintf(buf, "%s, pin %3d", dimm_str, pin); + } else { + int dimm; + + /* Multi-bit error, we just dump out all the + * dimm labels associated with this bank. + */ + for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) { + sprintf(buf, "%s ", + prop->dimm_labels[first_dimm + dimm]); + buf += strlen(buf); + } + } + return 0; +} + +/* Accessing the registers is slightly complicated. If you want + * to get at the memory controller which is on the same processor + * the code is executing, you must use special ASI load/store else + * you go through the global mapping. + */ +static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset) +{ + unsigned long ret, this_cpu; + + preempt_disable(); + + this_cpu = real_hard_smp_processor_id(); + + if (p->portid == this_cpu) { + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=r" (ret) + : "r" (offset), "i" (ASI_MCU_CTRL_REG)); + } else { + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=r" (ret) + : "r" (p->regs + offset), + "i" (ASI_PHYS_BYPASS_EC_E)); + } + + preempt_enable(); + + return ret; +} + +#if 0 /* currently unused */ +static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val) +{ + if (p->portid == smp_processor_id()) { + __asm__ __volatile__("stxa %0, [%1] %2" + : : "r" (val), + "r" (offset), "i" (ASI_MCU_CTRL_REG)); + } else { + __asm__ __volatile__("ldxa %0, [%1] %2" + : : "r" (val), + "r" (p->regs + offset), + "i" (ASI_PHYS_BYPASS_EC_E)); + } +} +#endif + +static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val) +{ + struct chmc_bank_info *bp = &p->logical_banks[which_bank]; + + bp->p = p; + bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank; + bp->raw_reg = val; + bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT; + bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT; + bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT; + bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT; + bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT; + + bp->base = (bp->um); + bp->base &= ~(bp->uk); + bp->base <<= PA_UPPER_BITS_SHIFT; + + switch(bp->lk) { + case 0xf: + default: + bp->interleave = 1; + break; + + case 0xe: + bp->interleave = 2; + break; + + case 0xc: + bp->interleave = 4; + break; + + case 0x8: + bp->interleave = 8; + break; + + case 0x0: + bp->interleave = 16; + break; + } + + /* UK[10] is reserved, and UK[11] is not set for the SDRAM + * bank size definition. + */ + bp->size = (((unsigned long)bp->uk & + ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT; + bp->size /= bp->interleave; +} + +static void chmc_fetch_decode_regs(struct chmc *p) +{ + if (p->layout_size == 0) + return; + + chmc_interpret_one_decode_reg(p, 0, + chmc_read_mcreg(p, CHMCTRL_DECODE1)); + chmc_interpret_one_decode_reg(p, 1, + chmc_read_mcreg(p, CHMCTRL_DECODE2)); + chmc_interpret_one_decode_reg(p, 2, + chmc_read_mcreg(p, CHMCTRL_DECODE3)); + chmc_interpret_one_decode_reg(p, 3, + chmc_read_mcreg(p, CHMCTRL_DECODE4)); +} + +static int chmc_probe(struct platform_device *op) +{ + struct device_node *dp = op->dev.of_node; + unsigned long ver; + const void *pval; + int len, portid; + struct chmc *p; + int err; + + err = -ENODEV; + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + if ((ver >> 32UL) == __JALAPENO_ID || + (ver >> 32UL) == __SERRANO_ID) + goto out; + + portid = of_getintprop_default(dp, "portid", -1); + if (portid == -1) + goto out; + + pval = of_get_property(dp, "memory-layout", &len); + if (pval && len > sizeof(p->layout_prop)) { + printk(KERN_ERR PFX "Unexpected memory-layout property " + "size %d.\n", len); + goto out; + } + + err = -ENOMEM; + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + printk(KERN_ERR PFX "Could not allocate struct chmc.\n"); + goto out; + } + + p->portid = portid; + p->layout_size = len; + if (!pval) + p->layout_size = 0; + else + memcpy(&p->layout_prop, pval, len); + + p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc"); + if (!p->regs) { + printk(KERN_ERR PFX "Could not map registers.\n"); + goto out_free; + } + + if (p->layout_size != 0UL) { + p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1); + p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2); + p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3); + p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4); + p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL); + } + + chmc_fetch_decode_regs(p); + + mc_list_add(&p->list); + + printk(KERN_INFO PFX "UltraSPARC-III memory controller at %pOF [%s]\n", + dp, + (p->layout_size ? "ACTIVE" : "INACTIVE")); + + dev_set_drvdata(&op->dev, p); + + err = 0; + +out: + return err; + +out_free: + kfree(p); + goto out; +} + +static int us3mc_probe(struct platform_device *op) +{ + if (mc_type == MC_TYPE_SAFARI) + return chmc_probe(op); + else if (mc_type == MC_TYPE_JBUS) + return jbusmc_probe(op); + return -ENODEV; +} + +static void chmc_destroy(struct platform_device *op, struct chmc *p) +{ + list_del(&p->list); + of_iounmap(&op->resource[0], p->regs, 0x48); + kfree(p); +} + +static void jbusmc_destroy(struct platform_device *op, struct jbusmc *p) +{ + mc_list_del(&p->list); + of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); + kfree(p); +} + +static int us3mc_remove(struct platform_device *op) +{ + void *p = dev_get_drvdata(&op->dev); + + if (p) { + if (mc_type == MC_TYPE_SAFARI) + chmc_destroy(op, p); + else if (mc_type == MC_TYPE_JBUS) + jbusmc_destroy(op, p); + } + return 0; +} + +static const struct of_device_id us3mc_match[] = { + { + .name = "memory-controller", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, us3mc_match); + +static struct platform_driver us3mc_driver = { + .driver = { + .name = "us3mc", + .of_match_table = us3mc_match, + }, + .probe = us3mc_probe, + .remove = us3mc_remove, +}; + +static inline bool us3mc_platform(void) +{ + if (tlb_type == cheetah || tlb_type == cheetah_plus) + return true; + return false; +} + +static int __init us3mc_init(void) +{ + unsigned long ver; + int ret; + + if (!us3mc_platform()) + return -ENODEV; + + __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); + if ((ver >> 32UL) == __JALAPENO_ID || + (ver >> 32UL) == __SERRANO_ID) { + mc_type = MC_TYPE_JBUS; + us3mc_dimm_printer = jbusmc_print_dimm; + } else { + mc_type = MC_TYPE_SAFARI; + us3mc_dimm_printer = chmc_print_dimm; + } + + ret = register_dimm_printer(us3mc_dimm_printer); + + if (!ret) { + ret = platform_driver_register(&us3mc_driver); + if (ret) + unregister_dimm_printer(us3mc_dimm_printer); + } + return ret; +} + +static void __exit us3mc_cleanup(void) +{ + if (us3mc_platform()) { + unregister_dimm_printer(us3mc_dimm_printer); + platform_driver_unregister(&us3mc_driver); + } +} + +module_init(us3mc_init); +module_exit(us3mc_cleanup); diff --git a/arch/sparc/kernel/compat_audit.c b/arch/sparc/kernel/compat_audit.c new file mode 100644 index 000000000..f1ea0005a --- /dev/null +++ b/arch/sparc/kernel/compat_audit.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#define __32bit_syscall_numbers__ +#include +#include +#include "kernel.h" + +unsigned int sparc32_dir_class[] = { +#include +~0U +}; + +unsigned int sparc32_chattr_class[] = { +#include +~0U +}; + +unsigned int sparc32_write_class[] = { +#include +~0U +}; + +unsigned int sparc32_read_class[] = { +#include +~0U +}; + +unsigned int sparc32_signal_class[] = { +#include +~0U +}; + +int sparc32_classify_syscall(unsigned int syscall) +{ + switch(syscall) { + case __NR_open: + return AUDITSC_OPEN; + case __NR_openat: + return AUDITSC_OPENAT; + case __NR_socketcall: + return AUDITSC_SOCKETCALL; + case __NR_execve: + return AUDITSC_EXECVE; + case __NR_openat2: + return AUDITSC_OPENAT2; + default: + return AUDITSC_COMPAT; + } +} diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c new file mode 100644 index 000000000..79cd6ccfe --- /dev/null +++ b/arch/sparc/kernel/cpu.c @@ -0,0 +1,556 @@ +// SPDX-License-Identifier: GPL-2.0 +/* cpu.c: Dinky routines to look for the kind of Sparc cpu + * we are on. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "entry.h" + +DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; +EXPORT_PER_CPU_SYMBOL(__cpu_data); + +int ncpus_probed; +unsigned int fsr_storage; + +struct cpu_info { + int psr_vers; + const char *name; + const char *pmu_name; +}; + +struct fpu_info { + int fp_vers; + const char *name; +}; + +#define NOCPU 8 +#define NOFPU 8 + +struct manufacturer_info { + int psr_impl; + struct cpu_info cpu_info[NOCPU]; + struct fpu_info fpu_info[NOFPU]; +}; + +#define CPU(ver, _name) \ +{ .psr_vers = ver, .name = _name } + +#define CPU_PMU(ver, _name, _pmu_name) \ +{ .psr_vers = ver, .name = _name, .pmu_name = _pmu_name } + +#define FPU(ver, _name) \ +{ .fp_vers = ver, .name = _name } + +static const struct manufacturer_info __initconst manufacturer_info[] = { +{ + 0, + /* Sun4/100, 4/200, SLC */ + .cpu_info = { + CPU(0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"), + /* borned STP1012PGA */ + CPU(4, "Fujitsu MB86904"), + CPU(5, "Fujitsu TurboSparc MB86907"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(0, "Fujitsu MB86910 or Weitek WTL1164/5"), + FPU(1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"), + FPU(2, "LSI Logic L64802 or Texas Instruments ACT8847"), + /* SparcStation SLC, SparcStation1 */ + FPU(3, "Weitek WTL3170/2"), + /* SPARCstation-5 */ + FPU(4, "Lsi Logic/Meiko L64804 or compatible"), + FPU(-1, NULL) + } +},{ + 1, + .cpu_info = { + /* SparcStation2, SparcServer 490 & 690 */ + CPU(0, "LSI Logic Corporation - L64811"), + /* SparcStation2 */ + CPU(1, "Cypress/ROSS CY7C601"), + /* Embedded controller */ + CPU(3, "Cypress/ROSS CY7C611"), + /* Ross Technologies HyperSparc */ + CPU(0xf, "ROSS HyperSparc RT620"), + CPU(0xe, "ROSS HyperSparc RT625 or RT626"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(0, "ROSS HyperSparc combined IU/FPU"), + FPU(1, "Lsi Logic L64814"), + FPU(2, "Texas Instruments TMS390-C602A"), + FPU(3, "Cypress CY7C602 FPU"), + FPU(-1, NULL) + } +},{ + 2, + .cpu_info = { + /* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */ + /* Someone please write the code to support this beast! ;) */ + CPU(0, "Bipolar Integrated Technology - B5010"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(-1, NULL) + } +},{ + 3, + .cpu_info = { + CPU(0, "LSI Logic Corporation - unknown-type"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(-1, NULL) + } +},{ + PSR_IMPL_TI, + .cpu_info = { + CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"), + /* SparcClassic -- borned STP1010TAB-50*/ + CPU(1, "Texas Instruments, Inc. - MicroSparc"), + CPU(2, "Texas Instruments, Inc. - MicroSparc II"), + CPU(3, "Texas Instruments, Inc. - SuperSparc 51"), + CPU(4, "Texas Instruments, Inc. - SuperSparc 61"), + CPU(5, "Texas Instruments, Inc. - unknown"), + CPU(-1, NULL) + }, + .fpu_info = { + /* SuperSparc 50 module */ + FPU(0, "SuperSparc on-chip FPU"), + /* SparcClassic */ + FPU(4, "TI MicroSparc on chip FPU"), + FPU(-1, NULL) + } +},{ + 5, + .cpu_info = { + CPU(0, "Matsushita - MN10501"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(0, "Matsushita MN10501"), + FPU(-1, NULL) + } +},{ + 6, + .cpu_info = { + CPU(0, "Philips Corporation - unknown"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(-1, NULL) + } +},{ + 7, + .cpu_info = { + CPU(0, "Harvest VLSI Design Center, Inc. - unknown"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(-1, NULL) + } +},{ + 8, + .cpu_info = { + CPU(0, "Systems and Processes Engineering Corporation (SPEC)"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(-1, NULL) + } +},{ + 9, + .cpu_info = { + /* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */ + CPU(0, "Fujitsu or Weitek Power-UP"), + CPU(1, "Fujitsu or Weitek Power-UP"), + CPU(2, "Fujitsu or Weitek Power-UP"), + CPU(3, "Fujitsu or Weitek Power-UP"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(3, "Fujitsu or Weitek on-chip FPU"), + FPU(-1, NULL) + } +},{ + PSR_IMPL_LEON, /* Aeroflex Gaisler */ + .cpu_info = { + CPU(3, "LEON"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(2, "GRFPU"), + FPU(3, "GRFPU-Lite"), + FPU(-1, NULL) + } +},{ + 0x17, + .cpu_info = { + CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"), + CPU_PMU(0x11, "TI UltraSparc II (BlackBird)", "ultra12"), + CPU_PMU(0x12, "TI UltraSparc IIi (Sabre)", "ultra12"), + CPU_PMU(0x13, "TI UltraSparc IIe (Hummingbird)", "ultra12"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(0x10, "UltraSparc I integrated FPU"), + FPU(0x11, "UltraSparc II integrated FPU"), + FPU(0x12, "UltraSparc IIi integrated FPU"), + FPU(0x13, "UltraSparc IIe integrated FPU"), + FPU(-1, NULL) + } +},{ + 0x22, + .cpu_info = { + CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(0x10, "UltraSparc I integrated FPU"), + FPU(-1, NULL) + } +},{ + 0x3e, + .cpu_info = { + CPU_PMU(0x14, "TI UltraSparc III (Cheetah)", "ultra3"), + CPU_PMU(0x15, "TI UltraSparc III+ (Cheetah+)", "ultra3+"), + CPU_PMU(0x16, "TI UltraSparc IIIi (Jalapeno)", "ultra3i"), + CPU_PMU(0x18, "TI UltraSparc IV (Jaguar)", "ultra3+"), + CPU_PMU(0x19, "TI UltraSparc IV+ (Panther)", "ultra4+"), + CPU_PMU(0x22, "TI UltraSparc IIIi+ (Serrano)", "ultra3i"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(0x14, "UltraSparc III integrated FPU"), + FPU(0x15, "UltraSparc III+ integrated FPU"), + FPU(0x16, "UltraSparc IIIi integrated FPU"), + FPU(0x18, "UltraSparc IV integrated FPU"), + FPU(0x19, "UltraSparc IV+ integrated FPU"), + FPU(0x22, "UltraSparc IIIi+ integrated FPU"), + FPU(-1, NULL) + } +}}; + +/* In order to get the fpu type correct, you need to take the IDPROM's + * machine type value into consideration too. I will fix this. + */ + +static const char *sparc_cpu_type; +static const char *sparc_fpu_type; +const char *sparc_pmu_type; + + +static void __init set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers) +{ + const struct manufacturer_info *manuf; + int i; + + sparc_cpu_type = NULL; + sparc_fpu_type = NULL; + sparc_pmu_type = NULL; + manuf = NULL; + + for (i = 0; i < ARRAY_SIZE(manufacturer_info); i++) + { + if (psr_impl == manufacturer_info[i].psr_impl) { + manuf = &manufacturer_info[i]; + break; + } + } + if (manuf != NULL) + { + const struct cpu_info *cpu; + const struct fpu_info *fpu; + + cpu = &manuf->cpu_info[0]; + while (cpu->psr_vers != -1) + { + if (cpu->psr_vers == psr_vers) { + sparc_cpu_type = cpu->name; + sparc_pmu_type = cpu->pmu_name; + sparc_fpu_type = "No FPU"; + break; + } + cpu++; + } + fpu = &manuf->fpu_info[0]; + while (fpu->fp_vers != -1) + { + if (fpu->fp_vers == fpu_vers) { + sparc_fpu_type = fpu->name; + break; + } + fpu++; + } + } + if (sparc_cpu_type == NULL) + { + printk(KERN_ERR "CPU: Unknown chip, impl[0x%x] vers[0x%x]\n", + psr_impl, psr_vers); + sparc_cpu_type = "Unknown CPU"; + } + if (sparc_fpu_type == NULL) + { + printk(KERN_ERR "FPU: Unknown chip, impl[0x%x] vers[0x%x]\n", + psr_impl, fpu_vers); + sparc_fpu_type = "Unknown FPU"; + } + if (sparc_pmu_type == NULL) + sparc_pmu_type = "Unknown PMU"; +} + +#ifdef CONFIG_SPARC32 +static int show_cpuinfo(struct seq_file *m, void *__unused) +{ + seq_printf(m, + "cpu\t\t: %s\n" + "fpu\t\t: %s\n" + "promlib\t\t: Version %d Revision %d\n" + "prom\t\t: %d.%d\n" + "type\t\t: %s\n" + "ncpus probed\t: %d\n" + "ncpus active\t: %d\n" +#ifndef CONFIG_SMP + "CPU0Bogo\t: %lu.%02lu\n" + "CPU0ClkTck\t: %ld\n" +#endif + , + sparc_cpu_type, + sparc_fpu_type , + romvec->pv_romvers, + prom_rev, + romvec->pv_printrev >> 16, + romvec->pv_printrev & 0xffff, + &cputypval[0], + ncpus_probed, + num_online_cpus() +#ifndef CONFIG_SMP + , cpu_data(0).udelay_val/(500000/HZ), + (cpu_data(0).udelay_val/(5000/HZ)) % 100, + cpu_data(0).clock_tick +#endif + ); + +#ifdef CONFIG_SMP + smp_bogo(m); +#endif + mmu_info(m); +#ifdef CONFIG_SMP + smp_info(m); +#endif + return 0; +} +#endif /* CONFIG_SPARC32 */ + +#ifdef CONFIG_SPARC64 +unsigned int dcache_parity_tl1_occurred; +unsigned int icache_parity_tl1_occurred; + + +static int show_cpuinfo(struct seq_file *m, void *__unused) +{ + seq_printf(m, + "cpu\t\t: %s\n" + "fpu\t\t: %s\n" + "pmu\t\t: %s\n" + "prom\t\t: %s\n" + "type\t\t: %s\n" + "ncpus probed\t: %d\n" + "ncpus active\t: %d\n" + "D$ parity tl1\t: %u\n" + "I$ parity tl1\t: %u\n" +#ifndef CONFIG_SMP + "Cpu0ClkTck\t: %016lx\n" +#endif + , + sparc_cpu_type, + sparc_fpu_type, + sparc_pmu_type, + prom_version, + ((tlb_type == hypervisor) ? + "sun4v" : + "sun4u"), + ncpus_probed, + num_online_cpus(), + dcache_parity_tl1_occurred, + icache_parity_tl1_occurred +#ifndef CONFIG_SMP + , cpu_data(0).clock_tick +#endif + ); + cpucap_info(m); +#ifdef CONFIG_SMP + smp_bogo(m); +#endif + mmu_info(m); +#ifdef CONFIG_SMP + smp_info(m); +#endif + return 0; +} +#endif /* CONFIG_SPARC64 */ + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + /* The pointer we are returning is arbitrary, + * it just has to be non-NULL and not IS_ERR + * in the success case. + */ + return *pos == 0 ? &c_start : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start =c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + +#ifdef CONFIG_SPARC32 +static int __init cpu_type_probe(void) +{ + int psr_impl, psr_vers, fpu_vers; + int psr; + + psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK); + psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK); + + psr = get_psr(); + put_psr(psr | PSR_EF); + + if (psr_impl == PSR_IMPL_LEON) + fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7; + else + fpu_vers = ((get_fsr() >> 17) & 0x7); + + put_psr(psr); + + set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); + + return 0; +} +#endif /* CONFIG_SPARC32 */ + +#ifdef CONFIG_SPARC64 +static void __init sun4v_cpu_probe(void) +{ + switch (sun4v_chip_type) { + case SUN4V_CHIP_NIAGARA1: + sparc_cpu_type = "UltraSparc T1 (Niagara)"; + sparc_fpu_type = "UltraSparc T1 integrated FPU"; + sparc_pmu_type = "niagara"; + break; + + case SUN4V_CHIP_NIAGARA2: + sparc_cpu_type = "UltraSparc T2 (Niagara2)"; + sparc_fpu_type = "UltraSparc T2 integrated FPU"; + sparc_pmu_type = "niagara2"; + break; + + case SUN4V_CHIP_NIAGARA3: + sparc_cpu_type = "UltraSparc T3 (Niagara3)"; + sparc_fpu_type = "UltraSparc T3 integrated FPU"; + sparc_pmu_type = "niagara3"; + break; + + case SUN4V_CHIP_NIAGARA4: + sparc_cpu_type = "UltraSparc T4 (Niagara4)"; + sparc_fpu_type = "UltraSparc T4 integrated FPU"; + sparc_pmu_type = "niagara4"; + break; + + case SUN4V_CHIP_NIAGARA5: + sparc_cpu_type = "UltraSparc T5 (Niagara5)"; + sparc_fpu_type = "UltraSparc T5 integrated FPU"; + sparc_pmu_type = "niagara5"; + break; + + case SUN4V_CHIP_SPARC_M6: + sparc_cpu_type = "SPARC-M6"; + sparc_fpu_type = "SPARC-M6 integrated FPU"; + sparc_pmu_type = "sparc-m6"; + break; + + case SUN4V_CHIP_SPARC_M7: + sparc_cpu_type = "SPARC-M7"; + sparc_fpu_type = "SPARC-M7 integrated FPU"; + sparc_pmu_type = "sparc-m7"; + break; + + case SUN4V_CHIP_SPARC_M8: + sparc_cpu_type = "SPARC-M8"; + sparc_fpu_type = "SPARC-M8 integrated FPU"; + sparc_pmu_type = "sparc-m8"; + break; + + case SUN4V_CHIP_SPARC_SN: + sparc_cpu_type = "SPARC-SN"; + sparc_fpu_type = "SPARC-SN integrated FPU"; + sparc_pmu_type = "sparc-sn"; + break; + + case SUN4V_CHIP_SPARC64X: + sparc_cpu_type = "SPARC64-X"; + sparc_fpu_type = "SPARC64-X integrated FPU"; + sparc_pmu_type = "sparc64-x"; + break; + + default: + printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", + prom_cpu_compatible); + sparc_cpu_type = "Unknown SUN4V CPU"; + sparc_fpu_type = "Unknown SUN4V FPU"; + sparc_pmu_type = "Unknown SUN4V PMU"; + break; + } +} + +static int __init cpu_type_probe(void) +{ + if (tlb_type == hypervisor) { + sun4v_cpu_probe(); + } else { + unsigned long ver; + int manuf, impl; + + __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); + + manuf = ((ver >> 48) & 0xffff); + impl = ((ver >> 32) & 0xffff); + set_cpu_and_fpu(manuf, impl, impl); + } + return 0; +} +#endif /* CONFIG_SPARC64 */ + +early_initcall(cpu_type_probe); diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c new file mode 100644 index 000000000..f07ea88a8 --- /dev/null +++ b/arch/sparc/kernel/cpumap.c @@ -0,0 +1,439 @@ +// SPDX-License-Identifier: GPL-2.0 +/* cpumap.c: used for optimizing CPU assignment + * + * Copyright (C) 2009 Hong H. Pham + */ + +#include +#include +#include +#include +#include +#include +#include "cpumap.h" + + +enum { + CPUINFO_LVL_ROOT = 0, + CPUINFO_LVL_NODE, + CPUINFO_LVL_CORE, + CPUINFO_LVL_PROC, + CPUINFO_LVL_MAX, +}; + +enum { + ROVER_NO_OP = 0, + /* Increment rover every time level is visited */ + ROVER_INC_ON_VISIT = 1 << 0, + /* Increment parent's rover every time rover wraps around */ + ROVER_INC_PARENT_ON_LOOP = 1 << 1, +}; + +struct cpuinfo_node { + int id; + int level; + int num_cpus; /* Number of CPUs in this hierarchy */ + int parent_index; + int child_start; /* Array index of the first child node */ + int child_end; /* Array index of the last child node */ + int rover; /* Child node iterator */ +}; + +struct cpuinfo_level { + int start_index; /* Index of first node of a level in a cpuinfo tree */ + int end_index; /* Index of last node of a level in a cpuinfo tree */ + int num_nodes; /* Number of nodes in a level in a cpuinfo tree */ +}; + +struct cpuinfo_tree { + int total_nodes; + + /* Offsets into nodes[] for each level of the tree */ + struct cpuinfo_level level[CPUINFO_LVL_MAX]; + struct cpuinfo_node nodes[]; +}; + + +static struct cpuinfo_tree *cpuinfo_tree; + +static u16 cpu_distribution_map[NR_CPUS]; +static DEFINE_SPINLOCK(cpu_map_lock); + + +/* Niagara optimized cpuinfo tree traversal. */ +static const int niagara_iterate_method[] = { + [CPUINFO_LVL_ROOT] = ROVER_NO_OP, + + /* Strands (or virtual CPUs) within a core may not run concurrently + * on the Niagara, as instruction pipeline(s) are shared. Distribute + * work to strands in different cores first for better concurrency. + * Go to next NUMA node when all cores are used. + */ + [CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP, + + /* Strands are grouped together by proc_id in cpuinfo_sparc, i.e. + * a proc_id represents an instruction pipeline. Distribute work to + * strands in different proc_id groups if the core has multiple + * instruction pipelines (e.g. the Niagara 2/2+ has two). + */ + [CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT, + + /* Pick the next strand in the proc_id group. */ + [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT, +}; + +/* Generic cpuinfo tree traversal. Distribute work round robin across NUMA + * nodes. + */ +static const int generic_iterate_method[] = { + [CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT, + [CPUINFO_LVL_NODE] = ROVER_NO_OP, + [CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP, + [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP, +}; + + +static int cpuinfo_id(int cpu, int level) +{ + int id; + + switch (level) { + case CPUINFO_LVL_ROOT: + id = 0; + break; + case CPUINFO_LVL_NODE: + id = cpu_to_node(cpu); + break; + case CPUINFO_LVL_CORE: + id = cpu_data(cpu).core_id; + break; + case CPUINFO_LVL_PROC: + id = cpu_data(cpu).proc_id; + break; + default: + id = -EINVAL; + } + return id; +} + +/* + * Enumerate the CPU information in __cpu_data to determine the start index, + * end index, and number of nodes for each level in the cpuinfo tree. The + * total number of cpuinfo nodes required to build the tree is returned. + */ +static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level) +{ + int prev_id[CPUINFO_LVL_MAX]; + int i, n, num_nodes; + + for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) { + struct cpuinfo_level *lv = &tree_level[i]; + + prev_id[i] = -1; + lv->start_index = lv->end_index = lv->num_nodes = 0; + } + + num_nodes = 1; /* Include the root node */ + + for (i = 0; i < num_possible_cpus(); i++) { + if (!cpu_online(i)) + continue; + + n = cpuinfo_id(i, CPUINFO_LVL_NODE); + if (n > prev_id[CPUINFO_LVL_NODE]) { + tree_level[CPUINFO_LVL_NODE].num_nodes++; + prev_id[CPUINFO_LVL_NODE] = n; + num_nodes++; + } + n = cpuinfo_id(i, CPUINFO_LVL_CORE); + if (n > prev_id[CPUINFO_LVL_CORE]) { + tree_level[CPUINFO_LVL_CORE].num_nodes++; + prev_id[CPUINFO_LVL_CORE] = n; + num_nodes++; + } + n = cpuinfo_id(i, CPUINFO_LVL_PROC); + if (n > prev_id[CPUINFO_LVL_PROC]) { + tree_level[CPUINFO_LVL_PROC].num_nodes++; + prev_id[CPUINFO_LVL_PROC] = n; + num_nodes++; + } + } + + tree_level[CPUINFO_LVL_ROOT].num_nodes = 1; + + n = tree_level[CPUINFO_LVL_NODE].num_nodes; + tree_level[CPUINFO_LVL_NODE].start_index = 1; + tree_level[CPUINFO_LVL_NODE].end_index = n; + + n++; + tree_level[CPUINFO_LVL_CORE].start_index = n; + n += tree_level[CPUINFO_LVL_CORE].num_nodes; + tree_level[CPUINFO_LVL_CORE].end_index = n - 1; + + tree_level[CPUINFO_LVL_PROC].start_index = n; + n += tree_level[CPUINFO_LVL_PROC].num_nodes; + tree_level[CPUINFO_LVL_PROC].end_index = n - 1; + + return num_nodes; +} + +/* Build a tree representation of the CPU hierarchy using the per CPU + * information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are + * assumed to be sorted in ascending order based on node, core_id, and + * proc_id (in order of significance). + */ +static struct cpuinfo_tree *build_cpuinfo_tree(void) +{ + struct cpuinfo_tree *new_tree; + struct cpuinfo_node *node; + struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX]; + int num_cpus[CPUINFO_LVL_MAX]; + int level_rover[CPUINFO_LVL_MAX]; + int prev_id[CPUINFO_LVL_MAX]; + int n, id, cpu, prev_cpu, last_cpu, level; + + n = enumerate_cpuinfo_nodes(tmp_level); + + new_tree = kzalloc(struct_size(new_tree, nodes, n), GFP_ATOMIC); + if (!new_tree) + return NULL; + + new_tree->total_nodes = n; + memcpy(&new_tree->level, tmp_level, sizeof(tmp_level)); + + prev_cpu = cpu = cpumask_first(cpu_online_mask); + + /* Initialize all levels in the tree with the first CPU */ + for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) { + n = new_tree->level[level].start_index; + + level_rover[level] = n; + node = &new_tree->nodes[n]; + + id = cpuinfo_id(cpu, level); + if (unlikely(id < 0)) { + kfree(new_tree); + return NULL; + } + node->id = id; + node->level = level; + node->num_cpus = 1; + + node->parent_index = (level > CPUINFO_LVL_ROOT) + ? new_tree->level[level - 1].start_index : -1; + + node->child_start = node->child_end = node->rover = + (level == CPUINFO_LVL_PROC) + ? cpu : new_tree->level[level + 1].start_index; + + prev_id[level] = node->id; + num_cpus[level] = 1; + } + + for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) { + if (cpu_online(last_cpu)) + break; + } + + while (++cpu <= last_cpu) { + if (!cpu_online(cpu)) + continue; + + for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; + level--) { + id = cpuinfo_id(cpu, level); + if (unlikely(id < 0)) { + kfree(new_tree); + return NULL; + } + + if ((id != prev_id[level]) || (cpu == last_cpu)) { + prev_id[level] = id; + node = &new_tree->nodes[level_rover[level]]; + node->num_cpus = num_cpus[level]; + num_cpus[level] = 1; + + if (cpu == last_cpu) + node->num_cpus++; + + /* Connect tree node to parent */ + if (level == CPUINFO_LVL_ROOT) + node->parent_index = -1; + else + node->parent_index = + level_rover[level - 1]; + + if (level == CPUINFO_LVL_PROC) { + node->child_end = + (cpu == last_cpu) ? cpu : prev_cpu; + } else { + node->child_end = + level_rover[level + 1] - 1; + } + + /* Initialize the next node in the same level */ + n = ++level_rover[level]; + if (n <= new_tree->level[level].end_index) { + node = &new_tree->nodes[n]; + node->id = id; + node->level = level; + + /* Connect node to child */ + node->child_start = node->child_end = + node->rover = + (level == CPUINFO_LVL_PROC) + ? cpu : level_rover[level + 1]; + } + } else + num_cpus[level]++; + } + prev_cpu = cpu; + } + + return new_tree; +} + +static void increment_rover(struct cpuinfo_tree *t, int node_index, + int root_index, const int *rover_inc_table) +{ + struct cpuinfo_node *node = &t->nodes[node_index]; + int top_level, level; + + top_level = t->nodes[root_index].level; + for (level = node->level; level >= top_level; level--) { + node->rover++; + if (node->rover <= node->child_end) + return; + + node->rover = node->child_start; + /* If parent's rover does not need to be adjusted, stop here. */ + if ((level == top_level) || + !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP)) + return; + + node = &t->nodes[node->parent_index]; + } +} + +static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) +{ + const int *rover_inc_table; + int level, new_index, index = root_index; + + switch (sun4v_chip_type) { + case SUN4V_CHIP_NIAGARA1: + case SUN4V_CHIP_NIAGARA2: + case SUN4V_CHIP_NIAGARA3: + case SUN4V_CHIP_NIAGARA4: + case SUN4V_CHIP_NIAGARA5: + case SUN4V_CHIP_SPARC_M6: + case SUN4V_CHIP_SPARC_M7: + case SUN4V_CHIP_SPARC_M8: + case SUN4V_CHIP_SPARC_SN: + case SUN4V_CHIP_SPARC64X: + rover_inc_table = niagara_iterate_method; + break; + default: + rover_inc_table = generic_iterate_method; + } + + for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX; + level++) { + new_index = t->nodes[index].rover; + if (rover_inc_table[level] & ROVER_INC_ON_VISIT) + increment_rover(t, index, root_index, rover_inc_table); + + index = new_index; + } + return index; +} + +static void _cpu_map_rebuild(void) +{ + int i; + + if (cpuinfo_tree) { + kfree(cpuinfo_tree); + cpuinfo_tree = NULL; + } + + cpuinfo_tree = build_cpuinfo_tree(); + if (!cpuinfo_tree) + return; + + /* Build CPU distribution map that spans all online CPUs. No need + * to check if the CPU is online, as that is done when the cpuinfo + * tree is being built. + */ + for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++) + cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0); +} + +/* Fallback if the cpuinfo tree could not be built. CPU mapping is linear + * round robin. + */ +static int simple_map_to_cpu(unsigned int index) +{ + int i, end, cpu_rover; + + cpu_rover = 0; + end = index % num_online_cpus(); + for (i = 0; i < num_possible_cpus(); i++) { + if (cpu_online(cpu_rover)) { + if (cpu_rover >= end) + return cpu_rover; + + cpu_rover++; + } + } + + /* Impossible, since num_online_cpus() <= num_possible_cpus() */ + return cpumask_first(cpu_online_mask); +} + +static int _map_to_cpu(unsigned int index) +{ + struct cpuinfo_node *root_node; + + if (unlikely(!cpuinfo_tree)) { + _cpu_map_rebuild(); + if (!cpuinfo_tree) + return simple_map_to_cpu(index); + } + + root_node = &cpuinfo_tree->nodes[0]; +#ifdef CONFIG_HOTPLUG_CPU + if (unlikely(root_node->num_cpus != num_online_cpus())) { + _cpu_map_rebuild(); + if (!cpuinfo_tree) + return simple_map_to_cpu(index); + } +#endif + return cpu_distribution_map[index % root_node->num_cpus]; +} + +int map_to_cpu(unsigned int index) +{ + int mapped_cpu; + unsigned long flag; + + spin_lock_irqsave(&cpu_map_lock, flag); + mapped_cpu = _map_to_cpu(index); + +#ifdef CONFIG_HOTPLUG_CPU + while (unlikely(!cpu_online(mapped_cpu))) + mapped_cpu = _map_to_cpu(index); +#endif + spin_unlock_irqrestore(&cpu_map_lock, flag); + return mapped_cpu; +} +EXPORT_SYMBOL(map_to_cpu); + +void cpu_map_rebuild(void) +{ + unsigned long flag; + + spin_lock_irqsave(&cpu_map_lock, flag); + _cpu_map_rebuild(); + spin_unlock_irqrestore(&cpu_map_lock, flag); +} diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h new file mode 100644 index 000000000..7d5b77486 --- /dev/null +++ b/arch/sparc/kernel/cpumap.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CPUMAP_H +#define _CPUMAP_H + +#ifdef CONFIG_SMP +void cpu_map_rebuild(void); +int map_to_cpu(unsigned int index); +#define cpu_map_init() cpu_map_rebuild() +#else +#define cpu_map_init() do {} while (0) +static inline int map_to_cpu(unsigned int index) +{ + return raw_smp_processor_id(); +} +#endif + +#endif diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c new file mode 100644 index 000000000..23b6e50d4 --- /dev/null +++ b/arch/sparc/kernel/devices.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* devices.c: Initial scan of the prom device tree for important + * Sparc device nodes which we need to find. + * + * This is based on the sparc64 version, but sun4m doesn't always use + * the hardware MIDs, so be careful. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" + +static char *cpu_mid_prop(void) +{ + if (sparc_cpu_model == sun4d) + return "cpu-id"; + return "mid"; +} + +static int check_cpu_node(phandle nd, int *cur_inst, + int (*compare)(phandle, int, void *), void *compare_arg, + phandle *prom_node, int *mid) +{ + if (!compare(nd, *cur_inst, compare_arg)) { + if (prom_node) + *prom_node = nd; + if (mid) { + *mid = prom_getintdefault(nd, cpu_mid_prop(), 0); + if (sparc_cpu_model == sun4m) + *mid &= 3; + } + return 0; + } + + (*cur_inst)++; + + return -ENODEV; +} + +static int __cpu_find_by(int (*compare)(phandle, int, void *), + void *compare_arg, phandle *prom_node, int *mid) +{ + struct device_node *dp; + int cur_inst; + + cur_inst = 0; + for_each_node_by_type(dp, "cpu") { + int err = check_cpu_node(dp->phandle, &cur_inst, + compare, compare_arg, + prom_node, mid); + if (!err) { + of_node_put(dp); + return 0; + } + } + + return -ENODEV; +} + +static int cpu_instance_compare(phandle nd, int instance, void *_arg) +{ + int desired_instance = (int) _arg; + + if (instance == desired_instance) + return 0; + return -ENODEV; +} + +int cpu_find_by_instance(int instance, phandle *prom_node, int *mid) +{ + return __cpu_find_by(cpu_instance_compare, (void *)instance, + prom_node, mid); +} + +static int cpu_mid_compare(phandle nd, int instance, void *_arg) +{ + int desired_mid = (int) _arg; + int this_mid; + + this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0); + if (this_mid == desired_mid + || (sparc_cpu_model == sun4m && (this_mid & 3) == desired_mid)) + return 0; + return -ENODEV; +} + +int cpu_find_by_mid(int mid, phandle *prom_node) +{ + return __cpu_find_by(cpu_mid_compare, (void *)mid, + prom_node, NULL); +} + +/* sun4m uses truncated mids since we base the cpuid on the ttable/irqset + * address (0-3). This gives us the true hardware mid, which might have + * some other bits set. On 4d hardware and software mids are the same. + */ +int cpu_get_hwmid(phandle prom_node) +{ + return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV); +} + +void __init device_scan(void) +{ + printk(KERN_NOTICE "Booting Linux...\n"); + +#ifndef CONFIG_SMP + { + phandle cpu_node; + int err; + err = cpu_find_by_instance(0, &cpu_node, NULL); + if (err) { + /* Probably a sun4e, Sun is trying to trick us ;-) */ + prom_printf("No cpu nodes, cannot continue\n"); + prom_halt(); + } + cpu_data(0).clock_tick = prom_getintdefault(cpu_node, + "clock-frequency", + 0); + } +#endif /* !CONFIG_SMP */ + + auxio_probe(); + auxio_power_probe(); +} diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c new file mode 100644 index 000000000..4a5bdb0df --- /dev/null +++ b/arch/sparc/kernel/ds.c @@ -0,0 +1,1269 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* ds.c: Domain Services driver for Logical Domains + * + * Copyright (C) 2007, 2008 David S. Miller + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "kernel.h" + +#define DRV_MODULE_NAME "ds" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "1.0" +#define DRV_MODULE_RELDATE "Jul 11, 2007" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("Sun LDOM domain services driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +struct ds_msg_tag { + __u32 type; +#define DS_INIT_REQ 0x00 +#define DS_INIT_ACK 0x01 +#define DS_INIT_NACK 0x02 +#define DS_REG_REQ 0x03 +#define DS_REG_ACK 0x04 +#define DS_REG_NACK 0x05 +#define DS_UNREG_REQ 0x06 +#define DS_UNREG_ACK 0x07 +#define DS_UNREG_NACK 0x08 +#define DS_DATA 0x09 +#define DS_NACK 0x0a + + __u32 len; +}; + +/* Result codes */ +#define DS_OK 0x00 +#define DS_REG_VER_NACK 0x01 +#define DS_REG_DUP 0x02 +#define DS_INV_HDL 0x03 +#define DS_TYPE_UNKNOWN 0x04 + +struct ds_version { + __u16 major; + __u16 minor; +}; + +struct ds_ver_req { + struct ds_msg_tag tag; + struct ds_version ver; +}; + +struct ds_ver_ack { + struct ds_msg_tag tag; + __u16 minor; +}; + +struct ds_ver_nack { + struct ds_msg_tag tag; + __u16 major; +}; + +struct ds_reg_req { + struct ds_msg_tag tag; + __u64 handle; + __u16 major; + __u16 minor; + char svc_id[]; +}; + +struct ds_reg_ack { + struct ds_msg_tag tag; + __u64 handle; + __u16 minor; +}; + +struct ds_reg_nack { + struct ds_msg_tag tag; + __u64 handle; + __u16 major; +}; + +struct ds_unreg_req { + struct ds_msg_tag tag; + __u64 handle; +}; + +struct ds_unreg_ack { + struct ds_msg_tag tag; + __u64 handle; +}; + +struct ds_unreg_nack { + struct ds_msg_tag tag; + __u64 handle; +}; + +struct ds_data { + struct ds_msg_tag tag; + __u64 handle; +}; + +struct ds_data_nack { + struct ds_msg_tag tag; + __u64 handle; + __u64 result; +}; + +struct ds_info; +struct ds_cap_state { + __u64 handle; + + void (*data)(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len); + + const char *service_id; + + u8 state; +#define CAP_STATE_UNKNOWN 0x00 +#define CAP_STATE_REG_SENT 0x01 +#define CAP_STATE_REGISTERED 0x02 +}; + +static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, + void *buf, int len); +static void domain_shutdown_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len); +static void domain_panic_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len); +#ifdef CONFIG_HOTPLUG_CPU +static void dr_cpu_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len); +#endif +static void ds_pri_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len); +static void ds_var_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len); + +static struct ds_cap_state ds_states_template[] = { + { + .service_id = "md-update", + .data = md_update_data, + }, + { + .service_id = "domain-shutdown", + .data = domain_shutdown_data, + }, + { + .service_id = "domain-panic", + .data = domain_panic_data, + }, +#ifdef CONFIG_HOTPLUG_CPU + { + .service_id = "dr-cpu", + .data = dr_cpu_data, + }, +#endif + { + .service_id = "pri", + .data = ds_pri_data, + }, + { + .service_id = "var-config", + .data = ds_var_data, + }, + { + .service_id = "var-config-backup", + .data = ds_var_data, + }, +}; + +static DEFINE_SPINLOCK(ds_lock); + +struct ds_info { + struct ldc_channel *lp; + u8 hs_state; +#define DS_HS_START 0x01 +#define DS_HS_DONE 0x02 + + u64 id; + + void *rcv_buf; + int rcv_buf_len; + + struct ds_cap_state *ds_states; + int num_ds_states; + + struct ds_info *next; +}; + +static struct ds_info *ds_info_list; + +static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle) +{ + unsigned int index = handle >> 32; + + if (index >= dp->num_ds_states) + return NULL; + return &dp->ds_states[index]; +} + +static struct ds_cap_state *find_cap_by_string(struct ds_info *dp, + const char *name) +{ + int i; + + for (i = 0; i < dp->num_ds_states; i++) { + if (strcmp(dp->ds_states[i].service_id, name)) + continue; + + return &dp->ds_states[i]; + } + return NULL; +} + +static int __ds_send(struct ldc_channel *lp, void *data, int len) +{ + int err, limit = 1000; + + err = -EINVAL; + while (limit-- > 0) { + err = ldc_write(lp, data, len); + if (!err || (err != -EAGAIN)) + break; + udelay(1); + } + + return err; +} + +static int ds_send(struct ldc_channel *lp, void *data, int len) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&ds_lock, flags); + err = __ds_send(lp, data, len); + spin_unlock_irqrestore(&ds_lock, flags); + + return err; +} + +struct ds_md_update_req { + __u64 req_num; +}; + +struct ds_md_update_res { + __u64 req_num; + __u32 result; +}; + +static void md_update_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len) +{ + struct ldc_channel *lp = dp->lp; + struct ds_data *dpkt = buf; + struct ds_md_update_req *rp; + struct { + struct ds_data data; + struct ds_md_update_res res; + } pkt; + + rp = (struct ds_md_update_req *) (dpkt + 1); + + printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id); + + mdesc_update(); + + memset(&pkt, 0, sizeof(pkt)); + pkt.data.tag.type = DS_DATA; + pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); + pkt.data.handle = cp->handle; + pkt.res.req_num = rp->req_num; + pkt.res.result = DS_OK; + + ds_send(lp, &pkt, sizeof(pkt)); +} + +struct ds_shutdown_req { + __u64 req_num; + __u32 ms_delay; +}; + +struct ds_shutdown_res { + __u64 req_num; + __u32 result; + char reason[1]; +}; + +static void domain_shutdown_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len) +{ + struct ldc_channel *lp = dp->lp; + struct ds_data *dpkt = buf; + struct ds_shutdown_req *rp; + struct { + struct ds_data data; + struct ds_shutdown_res res; + } pkt; + + rp = (struct ds_shutdown_req *) (dpkt + 1); + + printk(KERN_ALERT "ds-%llu: Shutdown request from " + "LDOM manager received.\n", dp->id); + + memset(&pkt, 0, sizeof(pkt)); + pkt.data.tag.type = DS_DATA; + pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); + pkt.data.handle = cp->handle; + pkt.res.req_num = rp->req_num; + pkt.res.result = DS_OK; + pkt.res.reason[0] = 0; + + ds_send(lp, &pkt, sizeof(pkt)); + + orderly_poweroff(true); +} + +struct ds_panic_req { + __u64 req_num; +}; + +struct ds_panic_res { + __u64 req_num; + __u32 result; + char reason[1]; +}; + +static void domain_panic_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len) +{ + struct ldc_channel *lp = dp->lp; + struct ds_data *dpkt = buf; + struct ds_panic_req *rp; + struct { + struct ds_data data; + struct ds_panic_res res; + } pkt; + + rp = (struct ds_panic_req *) (dpkt + 1); + + printk(KERN_ALERT "ds-%llu: Panic request from " + "LDOM manager received.\n", dp->id); + + memset(&pkt, 0, sizeof(pkt)); + pkt.data.tag.type = DS_DATA; + pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); + pkt.data.handle = cp->handle; + pkt.res.req_num = rp->req_num; + pkt.res.result = DS_OK; + pkt.res.reason[0] = 0; + + ds_send(lp, &pkt, sizeof(pkt)); + + panic("PANIC requested by LDOM manager."); +} + +#ifdef CONFIG_HOTPLUG_CPU +struct dr_cpu_tag { + __u64 req_num; + __u32 type; +#define DR_CPU_CONFIGURE 0x43 +#define DR_CPU_UNCONFIGURE 0x55 +#define DR_CPU_FORCE_UNCONFIGURE 0x46 +#define DR_CPU_STATUS 0x53 + +/* Responses */ +#define DR_CPU_OK 0x6f +#define DR_CPU_ERROR 0x65 + + __u32 num_records; +}; + +struct dr_cpu_resp_entry { + __u32 cpu; + __u32 result; +#define DR_CPU_RES_OK 0x00 +#define DR_CPU_RES_FAILURE 0x01 +#define DR_CPU_RES_BLOCKED 0x02 +#define DR_CPU_RES_CPU_NOT_RESPONDING 0x03 +#define DR_CPU_RES_NOT_IN_MD 0x04 + + __u32 stat; +#define DR_CPU_STAT_NOT_PRESENT 0x00 +#define DR_CPU_STAT_UNCONFIGURED 0x01 +#define DR_CPU_STAT_CONFIGURED 0x02 + + __u32 str_off; +}; + +static void __dr_cpu_send_error(struct ds_info *dp, + struct ds_cap_state *cp, + struct ds_data *data) +{ + struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); + struct { + struct ds_data data; + struct dr_cpu_tag tag; + } pkt; + int msg_len; + + memset(&pkt, 0, sizeof(pkt)); + pkt.data.tag.type = DS_DATA; + pkt.data.handle = cp->handle; + pkt.tag.req_num = tag->req_num; + pkt.tag.type = DR_CPU_ERROR; + pkt.tag.num_records = 0; + + msg_len = (sizeof(struct ds_data) + + sizeof(struct dr_cpu_tag)); + + pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag); + + __ds_send(dp->lp, &pkt, msg_len); +} + +static void dr_cpu_send_error(struct ds_info *dp, + struct ds_cap_state *cp, + struct ds_data *data) +{ + unsigned long flags; + + spin_lock_irqsave(&ds_lock, flags); + __dr_cpu_send_error(dp, cp, data); + spin_unlock_irqrestore(&ds_lock, flags); +} + +#define CPU_SENTINEL 0xffffffff + +static void purge_dups(u32 *list, u32 num_ents) +{ + unsigned int i; + + for (i = 0; i < num_ents; i++) { + u32 cpu = list[i]; + unsigned int j; + + if (cpu == CPU_SENTINEL) + continue; + + for (j = i + 1; j < num_ents; j++) { + if (list[j] == cpu) + list[j] = CPU_SENTINEL; + } + } +} + +static int dr_cpu_size_response(int ncpus) +{ + return (sizeof(struct ds_data) + + sizeof(struct dr_cpu_tag) + + (sizeof(struct dr_cpu_resp_entry) * ncpus)); +} + +static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, + u64 handle, int resp_len, int ncpus, + cpumask_t *mask, u32 default_stat) +{ + struct dr_cpu_resp_entry *ent; + struct dr_cpu_tag *tag; + int i, cpu; + + tag = (struct dr_cpu_tag *) (resp + 1); + ent = (struct dr_cpu_resp_entry *) (tag + 1); + + resp->tag.type = DS_DATA; + resp->tag.len = resp_len - sizeof(struct ds_msg_tag); + resp->handle = handle; + tag->req_num = req_num; + tag->type = DR_CPU_OK; + tag->num_records = ncpus; + + i = 0; + for_each_cpu(cpu, mask) { + ent[i].cpu = cpu; + ent[i].result = DR_CPU_RES_OK; + ent[i].stat = default_stat; + i++; + } + BUG_ON(i != ncpus); +} + +static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, + u32 res, u32 stat) +{ + struct dr_cpu_resp_entry *ent; + struct dr_cpu_tag *tag; + int i; + + tag = (struct dr_cpu_tag *) (resp + 1); + ent = (struct dr_cpu_resp_entry *) (tag + 1); + + for (i = 0; i < ncpus; i++) { + if (ent[i].cpu != cpu) + continue; + ent[i].result = res; + ent[i].stat = stat; + break; + } +} + +static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp, + u64 req_num, cpumask_t *mask) +{ + struct ds_data *resp; + int resp_len, ncpus, cpu; + unsigned long flags; + + ncpus = cpumask_weight(mask); + resp_len = dr_cpu_size_response(ncpus); + resp = kzalloc(resp_len, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + dr_cpu_init_response(resp, req_num, cp->handle, + resp_len, ncpus, mask, + DR_CPU_STAT_CONFIGURED); + + mdesc_populate_present_mask(mask); + mdesc_fill_in_cpu_data(mask); + + for_each_cpu(cpu, mask) { + int err; + + printk(KERN_INFO "ds-%llu: Starting cpu %d...\n", + dp->id, cpu); + err = add_cpu(cpu); + if (err) { + __u32 res = DR_CPU_RES_FAILURE; + __u32 stat = DR_CPU_STAT_UNCONFIGURED; + + if (!cpu_present(cpu)) { + /* CPU not present in MD */ + res = DR_CPU_RES_NOT_IN_MD; + stat = DR_CPU_STAT_NOT_PRESENT; + } else if (err == -ENODEV) { + /* CPU did not call in successfully */ + res = DR_CPU_RES_CPU_NOT_RESPONDING; + } + + printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n", + dp->id, err); + dr_cpu_mark(resp, cpu, ncpus, res, stat); + } + } + + spin_lock_irqsave(&ds_lock, flags); + __ds_send(dp->lp, resp, resp_len); + spin_unlock_irqrestore(&ds_lock, flags); + + kfree(resp); + + /* Redistribute IRQs, taking into account the new cpus. */ + fixup_irqs(); + + return 0; +} + +static int dr_cpu_unconfigure(struct ds_info *dp, + struct ds_cap_state *cp, + u64 req_num, + cpumask_t *mask) +{ + struct ds_data *resp; + int resp_len, ncpus, cpu; + unsigned long flags; + + ncpus = cpumask_weight(mask); + resp_len = dr_cpu_size_response(ncpus); + resp = kzalloc(resp_len, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + dr_cpu_init_response(resp, req_num, cp->handle, + resp_len, ncpus, mask, + DR_CPU_STAT_UNCONFIGURED); + + for_each_cpu(cpu, mask) { + int err; + + printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", + dp->id, cpu); + err = remove_cpu(cpu); + if (err) + dr_cpu_mark(resp, cpu, ncpus, + DR_CPU_RES_FAILURE, + DR_CPU_STAT_CONFIGURED); + } + + spin_lock_irqsave(&ds_lock, flags); + __ds_send(dp->lp, resp, resp_len); + spin_unlock_irqrestore(&ds_lock, flags); + + kfree(resp); + + return 0; +} + +static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, + int len) +{ + struct ds_data *data = buf; + struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); + u32 *cpu_list = (u32 *) (tag + 1); + u64 req_num = tag->req_num; + cpumask_t mask; + unsigned int i; + int err; + + switch (tag->type) { + case DR_CPU_CONFIGURE: + case DR_CPU_UNCONFIGURE: + case DR_CPU_FORCE_UNCONFIGURE: + break; + + default: + dr_cpu_send_error(dp, cp, data); + return; + } + + purge_dups(cpu_list, tag->num_records); + + cpumask_clear(&mask); + for (i = 0; i < tag->num_records; i++) { + if (cpu_list[i] == CPU_SENTINEL) + continue; + + if (cpu_list[i] < nr_cpu_ids) + cpumask_set_cpu(cpu_list[i], &mask); + } + + if (tag->type == DR_CPU_CONFIGURE) + err = dr_cpu_configure(dp, cp, req_num, &mask); + else + err = dr_cpu_unconfigure(dp, cp, req_num, &mask); + + if (err) + dr_cpu_send_error(dp, cp, data); +} +#endif /* CONFIG_HOTPLUG_CPU */ + +struct ds_pri_msg { + __u64 req_num; + __u64 type; +#define DS_PRI_REQUEST 0x00 +#define DS_PRI_DATA 0x01 +#define DS_PRI_UPDATE 0x02 +}; + +static void ds_pri_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len) +{ + struct ds_data *dpkt = buf; + struct ds_pri_msg *rp; + + rp = (struct ds_pri_msg *) (dpkt + 1); + + printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n", + dp->id, rp->req_num, rp->type, len); +} + +struct ds_var_hdr { + __u32 type; +#define DS_VAR_SET_REQ 0x00 +#define DS_VAR_DELETE_REQ 0x01 +#define DS_VAR_SET_RESP 0x02 +#define DS_VAR_DELETE_RESP 0x03 +}; + +struct ds_var_set_msg { + struct ds_var_hdr hdr; + char name_and_value[]; +}; + +struct ds_var_delete_msg { + struct ds_var_hdr hdr; + char name[]; +}; + +struct ds_var_resp { + struct ds_var_hdr hdr; + __u32 result; +#define DS_VAR_SUCCESS 0x00 +#define DS_VAR_NO_SPACE 0x01 +#define DS_VAR_INVALID_VAR 0x02 +#define DS_VAR_INVALID_VAL 0x03 +#define DS_VAR_NOT_PRESENT 0x04 +}; + +static DEFINE_MUTEX(ds_var_mutex); +static int ds_var_doorbell; +static int ds_var_response; + +static void ds_var_data(struct ds_info *dp, + struct ds_cap_state *cp, + void *buf, int len) +{ + struct ds_data *dpkt = buf; + struct ds_var_resp *rp; + + rp = (struct ds_var_resp *) (dpkt + 1); + + if (rp->hdr.type != DS_VAR_SET_RESP && + rp->hdr.type != DS_VAR_DELETE_RESP) + return; + + ds_var_response = rp->result; + wmb(); + ds_var_doorbell = 1; +} + +void ldom_set_var(const char *var, const char *value) +{ + struct ds_cap_state *cp; + struct ds_info *dp; + unsigned long flags; + + spin_lock_irqsave(&ds_lock, flags); + cp = NULL; + for (dp = ds_info_list; dp; dp = dp->next) { + struct ds_cap_state *tmp; + + tmp = find_cap_by_string(dp, "var-config"); + if (tmp && tmp->state == CAP_STATE_REGISTERED) { + cp = tmp; + break; + } + } + if (!cp) { + for (dp = ds_info_list; dp; dp = dp->next) { + struct ds_cap_state *tmp; + + tmp = find_cap_by_string(dp, "var-config-backup"); + if (tmp && tmp->state == CAP_STATE_REGISTERED) { + cp = tmp; + break; + } + } + } + spin_unlock_irqrestore(&ds_lock, flags); + + if (cp) { + union { + struct { + struct ds_data data; + struct ds_var_set_msg msg; + } header; + char all[512]; + } pkt; + char *base, *p; + int msg_len, loops; + + if (strlen(var) + strlen(value) + 2 > + sizeof(pkt) - sizeof(pkt.header)) { + printk(KERN_ERR PFX + "contents length: %zu, which more than max: %lu," + "so could not set (%s) variable to (%s).\n", + strlen(var) + strlen(value) + 2, + sizeof(pkt) - sizeof(pkt.header), var, value); + return; + } + + memset(&pkt, 0, sizeof(pkt)); + pkt.header.data.tag.type = DS_DATA; + pkt.header.data.handle = cp->handle; + pkt.header.msg.hdr.type = DS_VAR_SET_REQ; + base = p = &pkt.header.msg.name_and_value[0]; + strcpy(p, var); + p += strlen(var) + 1; + strcpy(p, value); + p += strlen(value) + 1; + + msg_len = (sizeof(struct ds_data) + + sizeof(struct ds_var_set_msg) + + (p - base)); + msg_len = (msg_len + 3) & ~3; + pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag); + + mutex_lock(&ds_var_mutex); + + spin_lock_irqsave(&ds_lock, flags); + ds_var_doorbell = 0; + ds_var_response = -1; + + __ds_send(dp->lp, &pkt, msg_len); + spin_unlock_irqrestore(&ds_lock, flags); + + loops = 1000; + while (ds_var_doorbell == 0) { + if (loops-- < 0) + break; + barrier(); + udelay(100); + } + + mutex_unlock(&ds_var_mutex); + + if (ds_var_doorbell == 0 || + ds_var_response != DS_VAR_SUCCESS) + printk(KERN_ERR "ds-%llu: var-config [%s:%s] " + "failed, response(%d).\n", + dp->id, var, value, + ds_var_response); + } else { + printk(KERN_ERR PFX "var-config not registered so " + "could not set (%s) variable to (%s).\n", + var, value); + } +} + +static char full_boot_str[256] __attribute__((aligned(32))); +static int reboot_data_supported; + +void ldom_reboot(const char *boot_command) +{ + /* Don't bother with any of this if the boot_command + * is empty. + */ + if (boot_command && strlen(boot_command)) { + unsigned long len; + + snprintf(full_boot_str, sizeof(full_boot_str), "boot %s", + boot_command); + len = strlen(full_boot_str); + + if (reboot_data_supported) { + unsigned long ra = kimage_addr_to_ra(full_boot_str); + unsigned long hv_ret; + + hv_ret = sun4v_reboot_data_set(ra, len); + if (hv_ret != HV_EOK) + pr_err("SUN4V: Unable to set reboot data " + "hv_ret=%lu\n", hv_ret); + } else { + ldom_set_var("reboot-command", full_boot_str); + } + } + sun4v_mach_sir(); +} + +void ldom_power_off(void) +{ + sun4v_mach_exit(0); +} + +static void ds_conn_reset(struct ds_info *dp) +{ + printk(KERN_ERR "ds-%llu: ds_conn_reset() from %ps\n", + dp->id, __builtin_return_address(0)); +} + +static int register_services(struct ds_info *dp) +{ + struct ldc_channel *lp = dp->lp; + int i; + + for (i = 0; i < dp->num_ds_states; i++) { + struct { + struct ds_reg_req req; + u8 id_buf[256]; + } pbuf; + struct ds_cap_state *cp = &dp->ds_states[i]; + int err, msg_len; + u64 new_count; + + if (cp->state == CAP_STATE_REGISTERED) + continue; + + new_count = sched_clock() & 0xffffffff; + cp->handle = ((u64) i << 32) | new_count; + + msg_len = (sizeof(struct ds_reg_req) + + strlen(cp->service_id)); + + memset(&pbuf, 0, sizeof(pbuf)); + pbuf.req.tag.type = DS_REG_REQ; + pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag)); + pbuf.req.handle = cp->handle; + pbuf.req.major = 1; + pbuf.req.minor = 0; + strcpy(pbuf.id_buf, cp->service_id); + + err = __ds_send(lp, &pbuf, msg_len); + if (err > 0) + cp->state = CAP_STATE_REG_SENT; + } + return 0; +} + +static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt) +{ + + if (dp->hs_state == DS_HS_START) { + if (pkt->type != DS_INIT_ACK) + goto conn_reset; + + dp->hs_state = DS_HS_DONE; + + return register_services(dp); + } + + if (dp->hs_state != DS_HS_DONE) + goto conn_reset; + + if (pkt->type == DS_REG_ACK) { + struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt; + struct ds_cap_state *cp = find_cap(dp, ap->handle); + + if (!cp) { + printk(KERN_ERR "ds-%llu: REG ACK for unknown " + "handle %llx\n", dp->id, ap->handle); + return 0; + } + printk(KERN_INFO "ds-%llu: Registered %s service.\n", + dp->id, cp->service_id); + cp->state = CAP_STATE_REGISTERED; + } else if (pkt->type == DS_REG_NACK) { + struct ds_reg_nack *np = (struct ds_reg_nack *) pkt; + struct ds_cap_state *cp = find_cap(dp, np->handle); + + if (!cp) { + printk(KERN_ERR "ds-%llu: REG NACK for " + "unknown handle %llx\n", + dp->id, np->handle); + return 0; + } + cp->state = CAP_STATE_UNKNOWN; + } + + return 0; + +conn_reset: + ds_conn_reset(dp); + return -ECONNRESET; +} + +static void __send_ds_nack(struct ds_info *dp, u64 handle) +{ + struct ds_data_nack nack = { + .tag = { + .type = DS_NACK, + .len = (sizeof(struct ds_data_nack) - + sizeof(struct ds_msg_tag)), + }, + .handle = handle, + .result = DS_INV_HDL, + }; + + __ds_send(dp->lp, &nack, sizeof(nack)); +} + +static LIST_HEAD(ds_work_list); +static DECLARE_WAIT_QUEUE_HEAD(ds_wait); + +struct ds_queue_entry { + struct list_head list; + struct ds_info *dp; + int req_len; + int __pad; + u64 req[]; +}; + +static void process_ds_work(void) +{ + struct ds_queue_entry *qp, *tmp; + unsigned long flags; + LIST_HEAD(todo); + + spin_lock_irqsave(&ds_lock, flags); + list_splice_init(&ds_work_list, &todo); + spin_unlock_irqrestore(&ds_lock, flags); + + list_for_each_entry_safe(qp, tmp, &todo, list) { + struct ds_data *dpkt = (struct ds_data *) qp->req; + struct ds_info *dp = qp->dp; + struct ds_cap_state *cp = find_cap(dp, dpkt->handle); + int req_len = qp->req_len; + + if (!cp) { + printk(KERN_ERR "ds-%llu: Data for unknown " + "handle %llu\n", + dp->id, dpkt->handle); + + spin_lock_irqsave(&ds_lock, flags); + __send_ds_nack(dp, dpkt->handle); + spin_unlock_irqrestore(&ds_lock, flags); + } else { + cp->data(dp, cp, dpkt, req_len); + } + + list_del(&qp->list); + kfree(qp); + } +} + +static int ds_thread(void *__unused) +{ + DEFINE_WAIT(wait); + + while (1) { + prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE); + if (list_empty(&ds_work_list)) + schedule(); + finish_wait(&ds_wait, &wait); + + if (kthread_should_stop()) + break; + + process_ds_work(); + } + + return 0; +} + +static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len) +{ + struct ds_data *dpkt = (struct ds_data *) pkt; + struct ds_queue_entry *qp; + + qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); + if (!qp) { + __send_ds_nack(dp, dpkt->handle); + } else { + qp->dp = dp; + memcpy(&qp->req, pkt, len); + list_add_tail(&qp->list, &ds_work_list); + wake_up(&ds_wait); + } + return 0; +} + +static void ds_up(struct ds_info *dp) +{ + struct ldc_channel *lp = dp->lp; + struct ds_ver_req req; + int err; + + req.tag.type = DS_INIT_REQ; + req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag); + req.ver.major = 1; + req.ver.minor = 0; + + err = __ds_send(lp, &req, sizeof(req)); + if (err > 0) + dp->hs_state = DS_HS_START; +} + +static void ds_reset(struct ds_info *dp) +{ + int i; + + dp->hs_state = 0; + + for (i = 0; i < dp->num_ds_states; i++) { + struct ds_cap_state *cp = &dp->ds_states[i]; + + cp->state = CAP_STATE_UNKNOWN; + } +} + +static void ds_event(void *arg, int event) +{ + struct ds_info *dp = arg; + struct ldc_channel *lp = dp->lp; + unsigned long flags; + int err; + + spin_lock_irqsave(&ds_lock, flags); + + if (event == LDC_EVENT_UP) { + ds_up(dp); + spin_unlock_irqrestore(&ds_lock, flags); + return; + } + + if (event == LDC_EVENT_RESET) { + ds_reset(dp); + spin_unlock_irqrestore(&ds_lock, flags); + return; + } + + if (event != LDC_EVENT_DATA_READY) { + printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n", + dp->id, event); + spin_unlock_irqrestore(&ds_lock, flags); + return; + } + + err = 0; + while (1) { + struct ds_msg_tag *tag; + + err = ldc_read(lp, dp->rcv_buf, sizeof(*tag)); + + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + ds_conn_reset(dp); + break; + } + if (err == 0) + break; + + tag = dp->rcv_buf; + err = ldc_read(lp, tag + 1, tag->len); + + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + ds_conn_reset(dp); + break; + } + if (err < tag->len) + break; + + if (tag->type < DS_DATA) + err = ds_handshake(dp, dp->rcv_buf); + else + err = ds_data(dp, dp->rcv_buf, + sizeof(*tag) + err); + if (err == -ECONNRESET) + break; + } + + spin_unlock_irqrestore(&ds_lock, flags); +} + +static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + static int ds_version_printed; + struct ldc_channel_config ds_cfg = { + .event = ds_event, + .mtu = 4096, + .mode = LDC_MODE_STREAM, + }; + struct mdesc_handle *hp; + struct ldc_channel *lp; + struct ds_info *dp; + const u64 *val; + int err, i; + + if (ds_version_printed++ == 0) + printk(KERN_INFO "%s", version); + + dp = kzalloc(sizeof(*dp), GFP_KERNEL); + err = -ENOMEM; + if (!dp) + goto out_err; + + hp = mdesc_grab(); + val = mdesc_get_property(hp, vdev->mp, "id", NULL); + if (val) + dp->id = *val; + mdesc_release(hp); + + dp->rcv_buf = kzalloc(4096, GFP_KERNEL); + if (!dp->rcv_buf) + goto out_free_dp; + + dp->rcv_buf_len = 4096; + + dp->ds_states = kmemdup(ds_states_template, + sizeof(ds_states_template), GFP_KERNEL); + if (!dp->ds_states) + goto out_free_rcv_buf; + + dp->num_ds_states = ARRAY_SIZE(ds_states_template); + + for (i = 0; i < dp->num_ds_states; i++) + dp->ds_states[i].handle = ((u64)i << 32); + + ds_cfg.tx_irq = vdev->tx_irq; + ds_cfg.rx_irq = vdev->rx_irq; + + lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS"); + if (IS_ERR(lp)) { + err = PTR_ERR(lp); + goto out_free_ds_states; + } + dp->lp = lp; + + err = ldc_bind(lp); + if (err) + goto out_free_ldc; + + spin_lock_irq(&ds_lock); + dp->next = ds_info_list; + ds_info_list = dp; + spin_unlock_irq(&ds_lock); + + return err; + +out_free_ldc: + ldc_free(dp->lp); + +out_free_ds_states: + kfree(dp->ds_states); + +out_free_rcv_buf: + kfree(dp->rcv_buf); + +out_free_dp: + kfree(dp); + +out_err: + return err; +} + +static const struct vio_device_id ds_match[] = { + { + .type = "domain-services-port", + }, + {}, +}; + +static struct vio_driver ds_driver = { + .id_table = ds_match, + .probe = ds_probe, + .name = "ds", +}; + +static int __init ds_init(void) +{ + unsigned long hv_ret, major, minor; + + if (tlb_type == hypervisor) { + hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); + if (hv_ret == HV_EOK) { + pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", + major, minor); + reboot_data_supported = 1; + } + } + kthread_run(ds_thread, NULL, "kldomd"); + + return vio_register_driver(&ds_driver); +} + +fs_initcall(ds_init); diff --git a/arch/sparc/kernel/dtlb_miss.S b/arch/sparc/kernel/dtlb_miss.S new file mode 100644 index 000000000..fb9c78843 --- /dev/null +++ b/arch/sparc/kernel/dtlb_miss.S @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* DTLB ** ICACHE line 1: Context 0 check and TSB load */ + ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer + ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET + srlx %g6, 48, %g5 ! Get context + sllx %g6, 22, %g6 ! Zero out context + brz,pn %g5, kvmap_dtlb ! Context 0 processing + srlx %g6, 22, %g6 ! Delay slot + TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry + cmp %g4, %g6 ! Compare TAG + +/* DTLB ** ICACHE line 2: TSB compare and TLB load */ + bne,pn %xcc, tsb_miss_dtlb ! Miss + mov FAULT_CODE_DTLB, %g3 + stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load TLB + retry ! Trap done + nop + nop + nop + nop + +/* DTLB ** ICACHE line 3: */ + nop + nop + nop + nop + nop + nop + nop + nop + +/* DTLB ** ICACHE line 4: */ + nop + nop + nop + nop + nop + nop + nop + nop diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S new file mode 100644 index 000000000..9f945771b --- /dev/null +++ b/arch/sparc/kernel/dtlb_prot.S @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * dtlb_prot.S: DTLB protection trap strategy. + * This is included directly into the trap table. + * + * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) + * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + +/* Ways we can get here: + * + * [TL == 0] 1) User stores to readonly pages. + * [TL == 0] 2) Nucleus stores to user readonly pages. + * [TL > 0] 3) Nucleus stores to user readonly stack frame. + */ + +/* PROT ** ICACHE line 1: User DTLB protection trap */ + mov TLB_SFSR, %g1 + stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit + membar #Sync ! Synchronize stores + rdpr %pstate, %g5 ! Move into alt-globals + wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate + rdpr %tl, %g1 ! Need a winfixup? + cmp %g1, 1 ! Trap level >1? + mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr + +/* PROT ** ICACHE line 2: More real fault processing */ + ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 + srlx %g5, PAGE_SHIFT, %g5 + sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits + bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup + mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 + ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault + nop + nop + +/* PROT ** ICACHE line 3: Unused... */ + nop + nop + nop + nop + nop + nop + nop + nop + +/* PROT ** ICACHE line 4: Unused... */ + nop + nop + nop + nop + nop + nop + nop + nop diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c new file mode 100644 index 000000000..264b18647 --- /dev/null +++ b/arch/sparc/kernel/ebus.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ebus.c: EBUS DMA library code. + * + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1999 David S. Miller (davem@redhat.com) + */ + +#include +#include +#include +#include +#include + +#include +#include + +#define EBDMA_CSR 0x00UL /* Control/Status */ +#define EBDMA_ADDR 0x04UL /* DMA Address */ +#define EBDMA_COUNT 0x08UL /* DMA Count */ + +#define EBDMA_CSR_INT_PEND 0x00000001 +#define EBDMA_CSR_ERR_PEND 0x00000002 +#define EBDMA_CSR_DRAIN 0x00000004 +#define EBDMA_CSR_INT_EN 0x00000010 +#define EBDMA_CSR_RESET 0x00000080 +#define EBDMA_CSR_WRITE 0x00000100 +#define EBDMA_CSR_EN_DMA 0x00000200 +#define EBDMA_CSR_CYC_PEND 0x00000400 +#define EBDMA_CSR_DIAG_RD_DONE 0x00000800 +#define EBDMA_CSR_DIAG_WR_DONE 0x00001000 +#define EBDMA_CSR_EN_CNT 0x00002000 +#define EBDMA_CSR_TC 0x00004000 +#define EBDMA_CSR_DIS_CSR_DRN 0x00010000 +#define EBDMA_CSR_BURST_SZ_MASK 0x000c0000 +#define EBDMA_CSR_BURST_SZ_1 0x00080000 +#define EBDMA_CSR_BURST_SZ_4 0x00000000 +#define EBDMA_CSR_BURST_SZ_8 0x00040000 +#define EBDMA_CSR_BURST_SZ_16 0x000c0000 +#define EBDMA_CSR_DIAG_EN 0x00100000 +#define EBDMA_CSR_DIS_ERR_PEND 0x00400000 +#define EBDMA_CSR_TCI_DIS 0x00800000 +#define EBDMA_CSR_EN_NEXT 0x01000000 +#define EBDMA_CSR_DMA_ON 0x02000000 +#define EBDMA_CSR_A_LOADED 0x04000000 +#define EBDMA_CSR_NA_LOADED 0x08000000 +#define EBDMA_CSR_DEV_ID_MASK 0xf0000000 + +#define EBUS_DMA_RESET_TIMEOUT 10000 + +static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain) +{ + int i; + u32 val = 0; + + writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR); + udelay(1); + + if (no_drain) + return; + + for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) { + val = readl(p->regs + EBDMA_CSR); + + if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND))) + break; + udelay(10); + } +} + +static irqreturn_t ebus_dma_irq(int irq, void *dev_id) +{ + struct ebus_dma_info *p = dev_id; + unsigned long flags; + u32 csr = 0; + + spin_lock_irqsave(&p->lock, flags); + csr = readl(p->regs + EBDMA_CSR); + writel(csr, p->regs + EBDMA_CSR); + spin_unlock_irqrestore(&p->lock, flags); + + if (csr & EBDMA_CSR_ERR_PEND) { + printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name); + p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie); + return IRQ_HANDLED; + } else if (csr & EBDMA_CSR_INT_PEND) { + p->callback(p, + (csr & EBDMA_CSR_TC) ? + EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE, + p->client_cookie); + return IRQ_HANDLED; + } + + return IRQ_NONE; + +} + +int ebus_dma_register(struct ebus_dma_info *p) +{ + u32 csr; + + if (!p->regs) + return -EINVAL; + if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER | + EBUS_DMA_FLAG_TCI_DISABLE)) + return -EINVAL; + if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback) + return -EINVAL; + if (!strlen(p->name)) + return -EINVAL; + + __ebus_dma_reset(p, 1); + + csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT; + + if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE) + csr |= EBDMA_CSR_TCI_DIS; + + writel(csr, p->regs + EBDMA_CSR); + + return 0; +} +EXPORT_SYMBOL(ebus_dma_register); + +int ebus_dma_irq_enable(struct ebus_dma_info *p, int on) +{ + unsigned long flags; + u32 csr; + + if (on) { + if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) { + if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p)) + return -EBUSY; + } + + spin_lock_irqsave(&p->lock, flags); + csr = readl(p->regs + EBDMA_CSR); + csr |= EBDMA_CSR_INT_EN; + writel(csr, p->regs + EBDMA_CSR); + spin_unlock_irqrestore(&p->lock, flags); + } else { + spin_lock_irqsave(&p->lock, flags); + csr = readl(p->regs + EBDMA_CSR); + csr &= ~EBDMA_CSR_INT_EN; + writel(csr, p->regs + EBDMA_CSR); + spin_unlock_irqrestore(&p->lock, flags); + + if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) { + free_irq(p->irq, p); + } + } + + return 0; +} +EXPORT_SYMBOL(ebus_dma_irq_enable); + +void ebus_dma_unregister(struct ebus_dma_info *p) +{ + unsigned long flags; + u32 csr; + int irq_on = 0; + + spin_lock_irqsave(&p->lock, flags); + csr = readl(p->regs + EBDMA_CSR); + if (csr & EBDMA_CSR_INT_EN) { + csr &= ~EBDMA_CSR_INT_EN; + writel(csr, p->regs + EBDMA_CSR); + irq_on = 1; + } + spin_unlock_irqrestore(&p->lock, flags); + + if (irq_on) + free_irq(p->irq, p); +} +EXPORT_SYMBOL(ebus_dma_unregister); + +int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len) +{ + unsigned long flags; + u32 csr; + int err; + + if (len >= (1 << 24)) + return -EINVAL; + + spin_lock_irqsave(&p->lock, flags); + csr = readl(p->regs + EBDMA_CSR); + err = -EINVAL; + if (!(csr & EBDMA_CSR_EN_DMA)) + goto out; + err = -EBUSY; + if (csr & EBDMA_CSR_NA_LOADED) + goto out; + + writel(len, p->regs + EBDMA_COUNT); + writel(bus_addr, p->regs + EBDMA_ADDR); + err = 0; + +out: + spin_unlock_irqrestore(&p->lock, flags); + + return err; +} +EXPORT_SYMBOL(ebus_dma_request); + +void ebus_dma_prepare(struct ebus_dma_info *p, int write) +{ + unsigned long flags; + u32 csr; + + spin_lock_irqsave(&p->lock, flags); + __ebus_dma_reset(p, 0); + + csr = (EBDMA_CSR_INT_EN | + EBDMA_CSR_EN_CNT | + EBDMA_CSR_BURST_SZ_16 | + EBDMA_CSR_EN_NEXT); + + if (write) + csr |= EBDMA_CSR_WRITE; + if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE) + csr |= EBDMA_CSR_TCI_DIS; + + writel(csr, p->regs + EBDMA_CSR); + + spin_unlock_irqrestore(&p->lock, flags); +} +EXPORT_SYMBOL(ebus_dma_prepare); + +unsigned int ebus_dma_residue(struct ebus_dma_info *p) +{ + return readl(p->regs + EBDMA_COUNT); +} +EXPORT_SYMBOL(ebus_dma_residue); + +unsigned int ebus_dma_addr(struct ebus_dma_info *p) +{ + return readl(p->regs + EBDMA_ADDR); +} +EXPORT_SYMBOL(ebus_dma_addr); + +void ebus_dma_enable(struct ebus_dma_info *p, int on) +{ + unsigned long flags; + u32 orig_csr, csr; + + spin_lock_irqsave(&p->lock, flags); + orig_csr = csr = readl(p->regs + EBDMA_CSR); + if (on) + csr |= EBDMA_CSR_EN_DMA; + else + csr &= ~EBDMA_CSR_EN_DMA; + if ((orig_csr & EBDMA_CSR_EN_DMA) != + (csr & EBDMA_CSR_EN_DMA)) + writel(csr, p->regs + EBDMA_CSR); + spin_unlock_irqrestore(&p->lock, flags); +} +EXPORT_SYMBOL(ebus_dma_enable); diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S new file mode 100644 index 000000000..a269ad2fe --- /dev/null +++ b/arch/sparc/kernel/entry.S @@ -0,0 +1,1355 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* arch/sparc/kernel/entry.S: Sparc trap low-level entry points. + * + * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define curptr g6 + +/* These are just handy. */ +#define _SV save %sp, -STACKFRAME_SZ, %sp +#define _RS restore + +#define FLUSH_ALL_KERNEL_WINDOWS \ + _SV; _SV; _SV; _SV; _SV; _SV; _SV; \ + _RS; _RS; _RS; _RS; _RS; _RS; _RS; + + .text + +#ifdef CONFIG_KGDB + .align 4 + .globl arch_kgdb_breakpoint + .type arch_kgdb_breakpoint,#function +arch_kgdb_breakpoint: + ta 0x7d + retl + nop + .size arch_kgdb_breakpoint,.-arch_kgdb_breakpoint +#endif + +#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) + .align 4 + .globl floppy_hardint +floppy_hardint: + /* + * This code cannot touch registers %l0 %l1 and %l2 + * because SAVE_ALL depends on their values. It depends + * on %l3 also, but we regenerate it before a call. + * Other registers are: + * %l3 -- base address of fdc registers + * %l4 -- pdma_vaddr + * %l5 -- scratch for ld/st address + * %l6 -- pdma_size + * %l7 -- scratch [floppy byte, ld/st address, aux. data] + */ + + /* Do we have work to do? */ + sethi %hi(doing_pdma), %l7 + ld [%l7 + %lo(doing_pdma)], %l7 + cmp %l7, 0 + be floppy_dosoftint + nop + + /* Load fdc register base */ + sethi %hi(fdc_status), %l3 + ld [%l3 + %lo(fdc_status)], %l3 + + /* Setup register addresses */ + sethi %hi(pdma_vaddr), %l5 ! transfer buffer + ld [%l5 + %lo(pdma_vaddr)], %l4 + sethi %hi(pdma_size), %l5 ! bytes to go + ld [%l5 + %lo(pdma_size)], %l6 +next_byte: + ldub [%l3], %l7 + + andcc %l7, 0x80, %g0 ! Does fifo still have data + bz floppy_fifo_emptied ! fifo has been emptied... + andcc %l7, 0x20, %g0 ! in non-dma mode still? + bz floppy_overrun ! nope, overrun + andcc %l7, 0x40, %g0 ! 0=write 1=read + bz floppy_write + sub %l6, 0x1, %l6 + + /* Ok, actually read this byte */ + ldub [%l3 + 1], %l7 + orcc %g0, %l6, %g0 + stb %l7, [%l4] + bne next_byte + add %l4, 0x1, %l4 + + b floppy_tdone + nop + +floppy_write: + /* Ok, actually write this byte */ + ldub [%l4], %l7 + orcc %g0, %l6, %g0 + stb %l7, [%l3 + 1] + bne next_byte + add %l4, 0x1, %l4 + + /* fall through... */ +floppy_tdone: + sethi %hi(pdma_vaddr), %l5 + st %l4, [%l5 + %lo(pdma_vaddr)] + sethi %hi(pdma_size), %l5 + st %l6, [%l5 + %lo(pdma_size)] + /* Flip terminal count pin */ + set auxio_register, %l7 + ld [%l7], %l7 + + ldub [%l7], %l5 + + or %l5, 0xc2, %l5 + stb %l5, [%l7] + andn %l5, 0x02, %l5 + +2: + /* Kill some time so the bits set */ + WRITE_PAUSE + WRITE_PAUSE + + stb %l5, [%l7] + + /* Prevent recursion */ + sethi %hi(doing_pdma), %l7 + b floppy_dosoftint + st %g0, [%l7 + %lo(doing_pdma)] + + /* We emptied the FIFO, but we haven't read everything + * as of yet. Store the current transfer address and + * bytes left to read so we can continue when the next + * fast IRQ comes in. + */ +floppy_fifo_emptied: + sethi %hi(pdma_vaddr), %l5 + st %l4, [%l5 + %lo(pdma_vaddr)] + sethi %hi(pdma_size), %l7 + st %l6, [%l7 + %lo(pdma_size)] + + /* Restore condition codes */ + wr %l0, 0x0, %psr + WRITE_PAUSE + + jmp %l1 + rett %l2 + +floppy_overrun: + sethi %hi(pdma_vaddr), %l5 + st %l4, [%l5 + %lo(pdma_vaddr)] + sethi %hi(pdma_size), %l5 + st %l6, [%l5 + %lo(pdma_size)] + /* Prevent recursion */ + sethi %hi(doing_pdma), %l7 + st %g0, [%l7 + %lo(doing_pdma)] + + /* fall through... */ +floppy_dosoftint: + rd %wim, %l3 + SAVE_ALL + + /* Set all IRQs off. */ + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + + mov 11, %o0 ! floppy irq level (unused anyway) + mov %g0, %o1 ! devid is not used in fast interrupts + call sparc_floppy_irq + add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs + + RESTORE_ALL + +#endif /* (CONFIG_BLK_DEV_FD) */ + + /* Bad trap handler */ + .globl bad_trap_handler +bad_trap_handler: + SAVE_ALL + + wr %l0, PSR_ET, %psr + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 ! pt_regs + call do_hw_interrupt + mov %l7, %o1 ! trap number + + RESTORE_ALL + +/* For now all IRQ's not registered get sent here. handler_irq() will + * see if a routine is registered to handle this interrupt and if not + * it will say so on the console. + */ + + .align 4 + .globl real_irq_entry, patch_handler_irq +real_irq_entry: + SAVE_ALL + +#ifdef CONFIG_SMP + .globl patchme_maybe_smp_msg + + cmp %l7, 11 +patchme_maybe_smp_msg: + bgu maybe_smp4m_msg + nop +#endif + +real_irq_continue: + or %l0, PSR_PIL, %g2 + wr %g2, 0x0, %psr + WRITE_PAUSE + wr %g2, PSR_ET, %psr + WRITE_PAUSE + mov %l7, %o0 ! irq level +patch_handler_irq: + call handler_irq + add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr + or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq + wr %g2, PSR_ET, %psr ! keep ET up + WRITE_PAUSE + + RESTORE_ALL + +#ifdef CONFIG_SMP + /* SMP per-cpu ticker interrupts are handled specially. */ +smp4m_ticker: + bne real_irq_continue+4 + or %l0, PSR_PIL, %g2 + wr %g2, 0x0, %psr + WRITE_PAUSE + wr %g2, PSR_ET, %psr + WRITE_PAUSE + call smp4m_percpu_timer_interrupt + add %sp, STACKFRAME_SZ, %o0 + wr %l0, PSR_ET, %psr + WRITE_PAUSE + RESTORE_ALL + +#define GET_PROCESSOR4M_ID(reg) \ + rd %tbr, %reg; \ + srl %reg, 12, %reg; \ + and %reg, 3, %reg; + + /* Here is where we check for possible SMP IPI passed to us + * on some level other than 15 which is the NMI and only used + * for cross calls. That has a separate entry point below. + * + * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*. + */ +maybe_smp4m_msg: + GET_PROCESSOR4M_ID(o3) + sethi %hi(sun4m_irq_percpu), %l5 + sll %o3, 2, %o3 + or %l5, %lo(sun4m_irq_percpu), %o5 + sethi %hi(0x70000000), %o2 ! Check all soft-IRQs + ld [%o5 + %o3], %o1 + ld [%o1 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending + andcc %o3, %o2, %g0 + be,a smp4m_ticker + cmp %l7, 14 + /* Soft-IRQ IPI */ + st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x70000000 + WRITE_PAUSE + ld [%o1 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending + WRITE_PAUSE + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + srl %o3, 28, %o2 ! shift for simpler checks below +maybe_smp4m_msg_check_single: + andcc %o2, 0x1, %g0 + beq,a maybe_smp4m_msg_check_mask + andcc %o2, 0x2, %g0 + call smp_call_function_single_interrupt + nop + andcc %o2, 0x2, %g0 +maybe_smp4m_msg_check_mask: + beq,a maybe_smp4m_msg_check_resched + andcc %o2, 0x4, %g0 + call smp_call_function_interrupt + nop + andcc %o2, 0x4, %g0 +maybe_smp4m_msg_check_resched: + /* rescheduling is done in RESTORE_ALL regardless, but incr stats */ + beq,a maybe_smp4m_msg_out + nop + call smp_resched_interrupt + nop +maybe_smp4m_msg_out: + RESTORE_ALL + + .align 4 + .globl linux_trap_ipi15_sun4m +linux_trap_ipi15_sun4m: + SAVE_ALL + sethi %hi(0x80000000), %o2 + GET_PROCESSOR4M_ID(o0) + sethi %hi(sun4m_irq_percpu), %l5 + or %l5, %lo(sun4m_irq_percpu), %o5 + sll %o0, 2, %o0 + ld [%o5 + %o0], %o5 + ld [%o5 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending + andcc %o3, %o2, %g0 + be sun4m_nmi_error ! Must be an NMI async memory error + st %o2, [%o5 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x80000000 + WRITE_PAUSE + ld [%o5 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending + WRITE_PAUSE + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + call smp4m_cross_call_irq + nop + b ret_trap_lockless_ipi + clr %l6 + + .globl smp4d_ticker + /* SMP per-cpu ticker interrupts are handled specially. */ +smp4d_ticker: + SAVE_ALL + or %l0, PSR_PIL, %g2 + sethi %hi(CC_ICLR), %o0 + sethi %hi(1 << 14), %o1 + or %o0, %lo(CC_ICLR), %o0 + stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */ + wr %g2, 0x0, %psr + WRITE_PAUSE + wr %g2, PSR_ET, %psr + WRITE_PAUSE + call smp4d_percpu_timer_interrupt + add %sp, STACKFRAME_SZ, %o0 + wr %l0, PSR_ET, %psr + WRITE_PAUSE + RESTORE_ALL + + .align 4 + .globl linux_trap_ipi15_sun4d +linux_trap_ipi15_sun4d: + SAVE_ALL + sethi %hi(CC_BASE), %o4 + sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2 + or %o4, (CC_EREG - CC_BASE), %o0 + ldda [%o0] ASI_M_MXCC, %o0 + andcc %o0, %o2, %g0 + bne 1f + sethi %hi(BB_STAT2), %o2 + lduba [%o2] ASI_M_CTL, %o2 + andcc %o2, BB_STAT2_MASK, %g0 + bne 2f + or %o4, (CC_ICLR - CC_BASE), %o0 + sethi %hi(1 << 15), %o1 + stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */ + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + call smp4d_cross_call_irq + nop + b ret_trap_lockless_ipi + clr %l6 + +1: /* MXCC error */ +2: /* BB error */ + /* Disable PIL 15 */ + set CC_IMSK, %l4 + lduha [%l4] ASI_M_MXCC, %l5 + sethi %hi(1 << 15), %l7 + or %l5, %l7, %l5 + stha %l5, [%l4] ASI_M_MXCC + /* FIXME */ +1: b,a 1b + + .globl smpleon_ipi + .extern leon_ipi_interrupt + /* SMP per-cpu IPI interrupts are handled specially. */ +smpleon_ipi: + SAVE_ALL + or %l0, PSR_PIL, %g2 + wr %g2, 0x0, %psr + WRITE_PAUSE + wr %g2, PSR_ET, %psr + WRITE_PAUSE + call leonsmp_ipi_interrupt + add %sp, STACKFRAME_SZ, %o1 ! pt_regs + wr %l0, PSR_ET, %psr + WRITE_PAUSE + RESTORE_ALL + + .align 4 + .globl linux_trap_ipi15_leon +linux_trap_ipi15_leon: + SAVE_ALL + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + call leon_cross_call_irq + nop + b ret_trap_lockless_ipi + clr %l6 + +#endif /* CONFIG_SMP */ + + /* This routine handles illegal instructions and privileged + * instruction attempts from user code. + */ + .align 4 + .globl bad_instruction +bad_instruction: + sethi %hi(0xc1f80000), %l4 + ld [%l1], %l5 + sethi %hi(0x81d80000), %l7 + and %l5, %l4, %l5 + cmp %l5, %l7 + be 1f + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call do_illegal_instruction + mov %l0, %o3 + + RESTORE_ALL + +1: /* unimplemented flush - just skip */ + jmpl %l2, %g0 + rett %l2 + 4 + + .align 4 + .globl priv_instruction +priv_instruction: + SAVE_ALL + + wr %l0, PSR_ET, %psr + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call do_priv_instruction + mov %l0, %o3 + + RESTORE_ALL + + /* This routine handles unaligned data accesses. */ + .align 4 + .globl mna_handler +mna_handler: + andcc %l0, PSR_PS, %g0 + be mna_fromuser + nop + + SAVE_ALL + + wr %l0, PSR_ET, %psr + WRITE_PAUSE + + ld [%l1], %o1 + call kernel_unaligned_trap + add %sp, STACKFRAME_SZ, %o0 + + RESTORE_ALL + +mna_fromuser: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + ld [%l1], %o1 + call user_unaligned_trap + add %sp, STACKFRAME_SZ, %o0 + + RESTORE_ALL + + /* This routine handles floating point disabled traps. */ + .align 4 + .globl fpd_trap_handler +fpd_trap_handler: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call do_fpd_trap + mov %l0, %o3 + + RESTORE_ALL + + /* This routine handles Floating Point Exceptions. */ + .align 4 + .globl fpe_trap_handler +fpe_trap_handler: + set fpsave_magic, %l5 + cmp %l1, %l5 + be 1f + sethi %hi(fpsave), %l5 + or %l5, %lo(fpsave), %l5 + cmp %l1, %l5 + bne 2f + sethi %hi(fpsave_catch2), %l5 + or %l5, %lo(fpsave_catch2), %l5 + wr %l0, 0x0, %psr + WRITE_PAUSE + jmp %l5 + rett %l5 + 4 +1: + sethi %hi(fpsave_catch), %l5 + or %l5, %lo(fpsave_catch), %l5 + wr %l0, 0x0, %psr + WRITE_PAUSE + jmp %l5 + rett %l5 + 4 + +2: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call do_fpe_trap + mov %l0, %o3 + + RESTORE_ALL + + /* This routine handles Tag Overflow Exceptions. */ + .align 4 + .globl do_tag_overflow +do_tag_overflow: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call handle_tag_overflow + mov %l0, %o3 + + RESTORE_ALL + + /* This routine handles Watchpoint Exceptions. */ + .align 4 + .globl do_watchpoint +do_watchpoint: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call handle_watchpoint + mov %l0, %o3 + + RESTORE_ALL + + /* This routine handles Register Access Exceptions. */ + .align 4 + .globl do_reg_access +do_reg_access: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call handle_reg_access + mov %l0, %o3 + + RESTORE_ALL + + /* This routine handles Co-Processor Disabled Exceptions. */ + .align 4 + .globl do_cp_disabled +do_cp_disabled: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call handle_cp_disabled + mov %l0, %o3 + + RESTORE_ALL + + /* This routine handles Co-Processor Exceptions. */ + .align 4 + .globl do_cp_exception +do_cp_exception: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call handle_cp_exception + mov %l0, %o3 + + RESTORE_ALL + + /* This routine handles Hardware Divide By Zero Exceptions. */ + .align 4 + .globl do_hw_divzero +do_hw_divzero: + SAVE_ALL + + wr %l0, PSR_ET, %psr ! re-enable traps + WRITE_PAUSE + + add %sp, STACKFRAME_SZ, %o0 + mov %l1, %o1 + mov %l2, %o2 + call handle_hw_divzero + mov %l0, %o3 + + RESTORE_ALL + + .align 4 + .globl do_flush_windows +do_flush_windows: + SAVE_ALL + + wr %l0, PSR_ET, %psr + WRITE_PAUSE + + andcc %l0, PSR_PS, %g0 + bne dfw_kernel + nop + + call flush_user_windows + nop + + /* Advance over the trap instruction. */ + ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 + add %l1, 0x4, %l2 + st %l1, [%sp + STACKFRAME_SZ + PT_PC] + st %l2, [%sp + STACKFRAME_SZ + PT_NPC] + + RESTORE_ALL + + .globl flush_patch_one + + /* We get these for debugging routines using __builtin_return_address() */ +dfw_kernel: +flush_patch_one: + FLUSH_ALL_KERNEL_WINDOWS + + /* Advance over the trap instruction. */ + ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 + add %l1, 0x4, %l2 + st %l1, [%sp + STACKFRAME_SZ + PT_PC] + st %l2, [%sp + STACKFRAME_SZ + PT_NPC] + + RESTORE_ALL + + /* The getcc software trap. The user wants the condition codes from + * the %psr in register %g1. + */ + + .align 4 + .globl getcc_trap_handler +getcc_trap_handler: + srl %l0, 20, %g1 ! give user + and %g1, 0xf, %g1 ! only ICC bits in %psr + jmp %l2 ! advance over trap instruction + rett %l2 + 0x4 ! like this... + + /* The setcc software trap. The user has condition codes in %g1 + * that it would like placed in the %psr. Be careful not to flip + * any unintentional bits! + */ + + .align 4 + .globl setcc_trap_handler +setcc_trap_handler: + sll %g1, 0x14, %l4 + set PSR_ICC, %l5 + andn %l0, %l5, %l0 ! clear ICC bits in %psr + and %l4, %l5, %l4 ! clear non-ICC bits in user value + or %l4, %l0, %l4 ! or them in... mix mix mix + + wr %l4, 0x0, %psr ! set new %psr + WRITE_PAUSE ! TI scumbags... + + jmp %l2 ! advance over trap instruction + rett %l2 + 0x4 ! like this... + +sun4m_nmi_error: + /* NMI async memory error handling. */ + sethi %hi(0x80000000), %l4 + sethi %hi(sun4m_irq_global), %o5 + ld [%o5 + %lo(sun4m_irq_global)], %l5 + st %l4, [%l5 + 0x0c] ! sun4m_irq_global->mask_set=0x80000000 + WRITE_PAUSE + ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending + WRITE_PAUSE + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + call sun4m_nmi + nop + st %l4, [%l5 + 0x08] ! sun4m_irq_global->mask_clear=0x80000000 + WRITE_PAUSE + ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending + WRITE_PAUSE + RESTORE_ALL + +#ifndef CONFIG_SMP + .align 4 + .globl linux_trap_ipi15_sun4m +linux_trap_ipi15_sun4m: + SAVE_ALL + + ba sun4m_nmi_error + nop +#endif /* CONFIG_SMP */ + + .align 4 + .globl srmmu_fault +srmmu_fault: + mov 0x400, %l5 + mov 0x300, %l4 + +LEON_PI(lda [%l5] ASI_LEON_MMUREGS, %l6) ! read sfar first +SUN_PI_(lda [%l5] ASI_M_MMUREGS, %l6) ! read sfar first + +LEON_PI(lda [%l4] ASI_LEON_MMUREGS, %l5) ! read sfsr last +SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5) ! read sfsr last + + andn %l6, 0xfff, %l6 + srl %l5, 6, %l5 ! and encode all info into l7 + + and %l5, 2, %l5 + or %l5, %l6, %l6 + + or %l6, %l7, %l7 ! l7 = [addr,write,txtfault] + + SAVE_ALL + + mov %l7, %o1 + mov %l7, %o2 + and %o1, 1, %o1 ! arg2 = text_faultp + mov %l7, %o3 + and %o2, 2, %o2 ! arg3 = writep + andn %o3, 0xfff, %o3 ! arg4 = faulting address + + wr %l0, PSR_ET, %psr + WRITE_PAUSE + + call do_sparc_fault + add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr + + RESTORE_ALL + + .align 4 +sunos_execv: + .globl sunos_execv + b sys_execve + clr %i2 + + .align 4 + .globl sys_sigstack +sys_sigstack: + mov %o7, %l5 + mov %fp, %o2 + call do_sys_sigstack + mov %l5, %o7 + + .align 4 + .globl sys_sigreturn +sys_sigreturn: + call do_sigreturn + add %sp, STACKFRAME_SZ, %o0 + + ld [%curptr + TI_FLAGS], %l5 + andcc %l5, _TIF_SYSCALL_TRACE, %g0 + be 1f + nop + + call syscall_trace + mov 1, %o1 + +1: + /* We don't want to muck with user registers like a + * normal syscall, just return. + */ + RESTORE_ALL + + .align 4 + .globl sys_rt_sigreturn +sys_rt_sigreturn: + call do_rt_sigreturn + add %sp, STACKFRAME_SZ, %o0 + + ld [%curptr + TI_FLAGS], %l5 + andcc %l5, _TIF_SYSCALL_TRACE, %g0 + be 1f + nop + + add %sp, STACKFRAME_SZ, %o0 + call syscall_trace + mov 1, %o1 + +1: + /* We are returning to a signal handler. */ + RESTORE_ALL + + /* Now that we have a real sys_clone, sys_fork() is + * implemented in terms of it. Our _real_ implementation + * of SunOS vfork() will use sys_vfork(). + * + * XXX These three should be consolidated into mostly shared + * XXX code just like on sparc64... -DaveM + */ + .align 4 + .globl sys_fork, flush_patch_two +sys_fork: + mov %o7, %l5 +flush_patch_two: + FLUSH_ALL_KERNEL_WINDOWS; + ld [%curptr + TI_TASK], %o4 + rd %psr, %g4 + WRITE_PAUSE + rd %wim, %g5 + WRITE_PAUSE + std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] + add %sp, STACKFRAME_SZ, %o0 + call sparc_fork + mov %l5, %o7 + + /* Whee, kernel threads! */ + .globl sys_clone, flush_patch_three +sys_clone: + mov %o7, %l5 +flush_patch_three: + FLUSH_ALL_KERNEL_WINDOWS; + ld [%curptr + TI_TASK], %o4 + rd %psr, %g4 + WRITE_PAUSE + rd %wim, %g5 + WRITE_PAUSE + std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] + add %sp, STACKFRAME_SZ, %o0 + call sparc_clone + mov %l5, %o7 + + /* Whee, real vfork! */ + .globl sys_vfork, flush_patch_four +sys_vfork: +flush_patch_four: + FLUSH_ALL_KERNEL_WINDOWS; + ld [%curptr + TI_TASK], %o4 + rd %psr, %g4 + WRITE_PAUSE + rd %wim, %g5 + WRITE_PAUSE + std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] + sethi %hi(sparc_vfork), %l1 + jmpl %l1 + %lo(sparc_vfork), %g0 + add %sp, STACKFRAME_SZ, %o0 + + .align 4 +linux_sparc_ni_syscall: + sethi %hi(sys_ni_syscall), %l7 + b do_syscall + or %l7, %lo(sys_ni_syscall), %l7 + +linux_syscall_trace: + add %sp, STACKFRAME_SZ, %o0 + call syscall_trace + mov 0, %o1 + cmp %o0, 0 + bne 3f + mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ld [%sp + STACKFRAME_SZ + PT_G1], %g1 + sethi %hi(sys_call_table), %l7 + ld [%sp + STACKFRAME_SZ + PT_I0], %i0 + or %l7, %lo(sys_call_table), %l7 + ld [%sp + STACKFRAME_SZ + PT_I1], %i1 + ld [%sp + STACKFRAME_SZ + PT_I2], %i2 + ld [%sp + STACKFRAME_SZ + PT_I3], %i3 + ld [%sp + STACKFRAME_SZ + PT_I4], %i4 + ld [%sp + STACKFRAME_SZ + PT_I5], %i5 + cmp %g1, NR_syscalls + bgeu 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 + mov %i0, %o0 + ld [%l7 + %l4], %l7 + mov %i1, %o1 + mov %i2, %o2 + mov %i3, %o3 + b 2f + mov %i4, %o4 + + .globl ret_from_fork +ret_from_fork: + call schedule_tail + ld [%g3 + TI_TASK], %o0 + b ret_sys_call + ld [%sp + STACKFRAME_SZ + PT_I0], %o0 + + .globl ret_from_kernel_thread +ret_from_kernel_thread: + call schedule_tail + ld [%g3 + TI_TASK], %o0 + ld [%sp + STACKFRAME_SZ + PT_G1], %l0 + call %l0 + ld [%sp + STACKFRAME_SZ + PT_G2], %o0 + rd %psr, %l1 + ld [%sp + STACKFRAME_SZ + PT_PSR], %l0 + andn %l0, PSR_CWP, %l0 + nop + and %l1, PSR_CWP, %l1 + or %l0, %l1, %l0 + st %l0, [%sp + STACKFRAME_SZ + PT_PSR] + b ret_sys_call + mov 0, %o0 + + /* Linux native system calls enter here... */ + .align 4 + .globl linux_sparc_syscall +linux_sparc_syscall: + sethi %hi(PSR_SYSCALL), %l4 + or %l0, %l4, %l0 + /* Direct access to user regs, must faster. */ + cmp %g1, NR_syscalls + bgeu linux_sparc_ni_syscall + sll %g1, 2, %l4 + ld [%l7 + %l4], %l7 + +do_syscall: + SAVE_ALL_HEAD + rd %wim, %l3 + + wr %l0, PSR_ET, %psr + mov %i0, %o0 + mov %i1, %o1 + mov %i2, %o2 + + ld [%curptr + TI_FLAGS], %l5 + mov %i3, %o3 + andcc %l5, _TIF_SYSCALL_TRACE, %g0 + mov %i4, %o4 + bne linux_syscall_trace + mov %i0, %l6 +2: + call %l7 + mov %i5, %o5 + +3: + st %o0, [%sp + STACKFRAME_SZ + PT_I0] + +ret_sys_call: + ld [%curptr + TI_FLAGS], %l5 + cmp %o0, -ERESTART_RESTARTBLOCK + ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 + set PSR_C, %g2 + bgeu 1f + andcc %l5, _TIF_SYSCALL_TRACE, %g0 + + /* System call success, clear Carry condition code. */ + andn %g3, %g2, %g3 + st %g3, [%sp + STACKFRAME_SZ + PT_PSR] + bne linux_syscall_trace2 + ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ + add %l1, 0x4, %l2 /* npc = npc+4 */ + st %l1, [%sp + STACKFRAME_SZ + PT_PC] + b ret_trap_entry + st %l2, [%sp + STACKFRAME_SZ + PT_NPC] +1: + /* System call failure, set Carry condition code. + * Also, get abs(errno) to return to the process. + */ + sub %g0, %o0, %o0 + or %g3, %g2, %g3 + st %o0, [%sp + STACKFRAME_SZ + PT_I0] + st %g3, [%sp + STACKFRAME_SZ + PT_PSR] + bne linux_syscall_trace2 + ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ + add %l1, 0x4, %l2 /* npc = npc+4 */ + st %l1, [%sp + STACKFRAME_SZ + PT_PC] + b ret_trap_entry + st %l2, [%sp + STACKFRAME_SZ + PT_NPC] + +linux_syscall_trace2: + add %sp, STACKFRAME_SZ, %o0 + mov 1, %o1 + call syscall_trace + add %l1, 0x4, %l2 /* npc = npc+4 */ + st %l1, [%sp + STACKFRAME_SZ + PT_PC] + b ret_trap_entry + st %l2, [%sp + STACKFRAME_SZ + PT_NPC] + + +/* Saving and restoring the FPU state is best done from lowlevel code. + * + * void fpsave(unsigned long *fpregs, unsigned long *fsr, + * void *fpqueue, unsigned long *fpqdepth) + */ + + .globl fpsave +fpsave: + st %fsr, [%o1] ! this can trap on us if fpu is in bogon state + ld [%o1], %g1 + set 0x2000, %g4 + andcc %g1, %g4, %g0 + be 2f + mov 0, %g2 + + /* We have an fpqueue to save. */ +1: + std %fq, [%o2] +fpsave_magic: + st %fsr, [%o1] + ld [%o1], %g3 + andcc %g3, %g4, %g0 + add %g2, 1, %g2 + bne 1b + add %o2, 8, %o2 + +2: + st %g2, [%o3] + + std %f0, [%o0 + 0x00] + std %f2, [%o0 + 0x08] + std %f4, [%o0 + 0x10] + std %f6, [%o0 + 0x18] + std %f8, [%o0 + 0x20] + std %f10, [%o0 + 0x28] + std %f12, [%o0 + 0x30] + std %f14, [%o0 + 0x38] + std %f16, [%o0 + 0x40] + std %f18, [%o0 + 0x48] + std %f20, [%o0 + 0x50] + std %f22, [%o0 + 0x58] + std %f24, [%o0 + 0x60] + std %f26, [%o0 + 0x68] + std %f28, [%o0 + 0x70] + retl + std %f30, [%o0 + 0x78] + + /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd + * code for pointing out this possible deadlock, while we save state + * above we could trap on the fsr store so our low level fpu trap + * code has to know how to deal with this. + */ +fpsave_catch: + b fpsave_magic + 4 + st %fsr, [%o1] + +fpsave_catch2: + b fpsave + 4 + st %fsr, [%o1] + + /* void fpload(unsigned long *fpregs, unsigned long *fsr); */ + + .globl fpload +fpload: + ldd [%o0 + 0x00], %f0 + ldd [%o0 + 0x08], %f2 + ldd [%o0 + 0x10], %f4 + ldd [%o0 + 0x18], %f6 + ldd [%o0 + 0x20], %f8 + ldd [%o0 + 0x28], %f10 + ldd [%o0 + 0x30], %f12 + ldd [%o0 + 0x38], %f14 + ldd [%o0 + 0x40], %f16 + ldd [%o0 + 0x48], %f18 + ldd [%o0 + 0x50], %f20 + ldd [%o0 + 0x58], %f22 + ldd [%o0 + 0x60], %f24 + ldd [%o0 + 0x68], %f26 + ldd [%o0 + 0x70], %f28 + ldd [%o0 + 0x78], %f30 + ld [%o1], %fsr + retl + nop + + /* __ndelay and __udelay take two arguments: + * 0 - nsecs or usecs to delay + * 1 - per_cpu udelay_val (loops per jiffy) + * + * Note that ndelay gives HZ times higher resolution but has a 10ms + * limit. udelay can handle up to 1s. + */ + .globl __ndelay +__ndelay: + save %sp, -STACKFRAME_SZ, %sp + mov %i0, %o0 ! round multiplier up so large ns ok + mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ) + umul %o0, %o1, %o0 + rd %y, %o1 + mov %i1, %o1 ! udelay_val + umul %o0, %o1, %o0 + rd %y, %o1 + ba delay_continue + mov %o1, %o0 ! >>32 later for better resolution + + .globl __udelay +__udelay: + save %sp, -STACKFRAME_SZ, %sp + mov %i0, %o0 + sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok + or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000 + umul %o0, %o1, %o0 + rd %y, %o1 + mov %i1, %o1 ! udelay_val + umul %o0, %o1, %o0 + rd %y, %o1 + sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32, + or %g0, %lo(0x028f4b62), %l0 + addcc %o0, %l0, %o0 ! 2**32 * 0.009 999 + bcs,a 3f + add %o1, 0x01, %o1 +3: + mov HZ, %o0 ! >>32 earlier for wider range + umul %o0, %o1, %o0 + rd %y, %o1 + +delay_continue: + cmp %o0, 0x0 +1: + bne 1b + subcc %o0, 1, %o0 + + ret + restore +EXPORT_SYMBOL(__udelay) +EXPORT_SYMBOL(__ndelay) + + /* Handle a software breakpoint */ + /* We have to inform parent that child has stopped */ + .align 4 + .globl breakpoint_trap +breakpoint_trap: + rd %wim,%l3 + SAVE_ALL + wr %l0, PSR_ET, %psr + WRITE_PAUSE + + st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls + call sparc_breakpoint + add %sp, STACKFRAME_SZ, %o0 + + RESTORE_ALL + +#ifdef CONFIG_KGDB + ENTRY(kgdb_trap_low) + rd %wim,%l3 + SAVE_ALL + wr %l0, PSR_ET, %psr + WRITE_PAUSE + + mov %l7, %o0 ! trap_level + call kgdb_trap + add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs + + RESTORE_ALL + ENDPROC(kgdb_trap_low) +#endif + + .align 4 + .globl flush_patch_exception +flush_patch_exception: + FLUSH_ALL_KERNEL_WINDOWS; + ldd [%o0], %o6 + jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h + mov 1, %g1 ! signal EFAULT condition + + .align 4 + .globl kill_user_windows, kuw_patch1_7win + .globl kuw_patch1 +kuw_patch1_7win: sll %o3, 6, %o3 + + /* No matter how much overhead this routine has in the worst + * case scenario, it is several times better than taking the + * traps with the old method of just doing flush_user_windows(). + */ +kill_user_windows: + ld [%g6 + TI_UWINMASK], %o0 ! get current umask + orcc %g0, %o0, %g0 ! if no bits set, we are done + be 3f ! nothing to do + rd %psr, %o5 ! must clear interrupts + or %o5, PSR_PIL, %o4 ! or else that could change + wr %o4, 0x0, %psr ! the uwinmask state + WRITE_PAUSE ! burn them cycles +1: + ld [%g6 + TI_UWINMASK], %o0 ! get consistent state + orcc %g0, %o0, %g0 ! did an interrupt come in? + be 4f ! yep, we are done + rd %wim, %o3 ! get current wim + srl %o3, 1, %o4 ! simulate a save +kuw_patch1: + sll %o3, 7, %o3 ! compute next wim + or %o4, %o3, %o3 ! result + andncc %o0, %o3, %o0 ! clean this bit in umask + bne kuw_patch1 ! not done yet + srl %o3, 1, %o4 ! begin another save simulation + wr %o3, 0x0, %wim ! set the new wim + st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask +4: + wr %o5, 0x0, %psr ! re-enable interrupts + WRITE_PAUSE ! burn baby burn +3: + retl ! return + st %g0, [%g6 + TI_W_SAVED] ! no windows saved + + .align 4 + .globl restore_current +restore_current: + LOAD_CURRENT(g6, o0) + retl + nop + +#ifdef CONFIG_PCIC_PCI +#include + + .align 4 + .globl linux_trap_ipi15_pcic +linux_trap_ipi15_pcic: + rd %wim, %l3 + SAVE_ALL + + /* + * First deactivate NMI + * or we cannot drop ET, cannot get window spill traps. + * The busy loop is necessary because the PIO error + * sometimes does not go away quickly and we trap again. + */ + sethi %hi(pcic_regs), %o1 + ld [%o1 + %lo(pcic_regs)], %o2 + + ! Get pending status for printouts later. + ld [%o2 + PCI_SYS_INT_PENDING], %o0 + + mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1 + stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR] +1: + ld [%o2 + PCI_SYS_INT_PENDING], %o1 + andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0 + bne 1b + nop + + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + + call pcic_nmi + add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs + RESTORE_ALL + + .globl pcic_nmi_trap_patch +pcic_nmi_trap_patch: + sethi %hi(linux_trap_ipi15_pcic), %l3 + jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0 + rd %psr, %l0 + .word 0 + +#endif /* CONFIG_PCIC_PCI */ + + .globl flushw_all +flushw_all: + save %sp, -0x40, %sp + save %sp, -0x40, %sp + save %sp, -0x40, %sp + save %sp, -0x40, %sp + save %sp, -0x40, %sp + save %sp, -0x40, %sp + save %sp, -0x40, %sp + restore + restore + restore + restore + restore + restore + ret + restore + +#ifdef CONFIG_SMP +ENTRY(hard_smp_processor_id) +661: rd %tbr, %g1 + srl %g1, 12, %o0 + and %o0, 3, %o0 + .section .cpuid_patch, "ax" + /* Instruction location. */ + .word 661b + /* SUN4D implementation. */ + lda [%g0] ASI_M_VIKING_TMP1, %o0 + nop + nop + /* LEON implementation. */ + rd %asr17, %o0 + srl %o0, 0x1c, %o0 + nop + .previous + retl + nop +ENDPROC(hard_smp_processor_id) +#endif + +/* End of entry.S */ diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h new file mode 100644 index 000000000..c746c0fd5 --- /dev/null +++ b/arch/sparc/kernel/entry.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ENTRY_H +#define _ENTRY_H + +#include +#include +#include + +/* irq */ +void handler_irq(int irq, struct pt_regs *regs); + +#ifdef CONFIG_SPARC32 +/* traps */ +void do_hw_interrupt(struct pt_regs *regs, unsigned long type); +void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); + +void do_priv_instruction(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void do_fpd_trap(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void do_fpe_trap(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_watchpoint(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_reg_access(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_cp_exception(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); + + + +/* entry.S */ +void fpsave(unsigned long *fpregs, unsigned long *fsr, + void *fpqueue, unsigned long *fpqdepth); +void fpload(unsigned long *fpregs, unsigned long *fsr); + +#else /* CONFIG_SPARC32 */ + +#include + +struct popc_3insn_patch_entry { + unsigned int addr; + unsigned int insns[3]; +}; +extern struct popc_3insn_patch_entry __popc_3insn_patch, + __popc_3insn_patch_end; + +struct popc_6insn_patch_entry { + unsigned int addr; + unsigned int insns[6]; +}; +extern struct popc_6insn_patch_entry __popc_6insn_patch, + __popc_6insn_patch_end; + +struct pause_patch_entry { + unsigned int addr; + unsigned int insns[3]; +}; +extern struct pause_patch_entry __pause_3insn_patch, + __pause_3insn_patch_end; + +void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, + struct sun4v_1insn_patch_entry *); +void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, + struct sun4v_2insn_patch_entry *); +void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *, + struct sun4v_2insn_patch_entry *); +extern unsigned int dcache_parity_tl1_occurred; +extern unsigned int icache_parity_tl1_occurred; + +asmlinkage void sparc_breakpoint(struct pt_regs *regs); +void timer_interrupt(int irq, struct pt_regs *regs); + +void do_notify_resume(struct pt_regs *regs, + unsigned long orig_i0, + unsigned long thread_info_flags); + +asmlinkage int syscall_trace_enter(struct pt_regs *regs); +asmlinkage void syscall_trace_leave(struct pt_regs *regs); + +void bad_trap_tl1(struct pt_regs *regs, long lvl); + +void do_fpieee(struct pt_regs *regs); +void do_fpother(struct pt_regs *regs); +void do_tof(struct pt_regs *regs); +void do_div0(struct pt_regs *regs); +void do_illegal_instruction(struct pt_regs *regs); +void mem_address_unaligned(struct pt_regs *regs, + unsigned long sfar, + unsigned long sfsr); +void sun4v_do_mna(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void do_privop(struct pt_regs *regs); +void do_privact(struct pt_regs *regs); +void do_cee(struct pt_regs *regs); +void do_div0_tl1(struct pt_regs *regs); +void do_fpieee_tl1(struct pt_regs *regs); +void do_fpother_tl1(struct pt_regs *regs); +void do_ill_tl1(struct pt_regs *regs); +void do_irq_tl1(struct pt_regs *regs); +void do_lddfmna_tl1(struct pt_regs *regs); +void do_stdfmna_tl1(struct pt_regs *regs); +void do_paw(struct pt_regs *regs); +void do_paw_tl1(struct pt_regs *regs); +void do_vaw(struct pt_regs *regs); +void do_vaw_tl1(struct pt_regs *regs); +void do_tof_tl1(struct pt_regs *regs); +void do_getpsr(struct pt_regs *regs); + +void spitfire_insn_access_exception(struct pt_regs *regs, + unsigned long sfsr, + unsigned long sfar); +void spitfire_insn_access_exception_tl1(struct pt_regs *regs, + unsigned long sfsr, + unsigned long sfar); +void spitfire_data_access_exception(struct pt_regs *regs, + unsigned long sfsr, + unsigned long sfar); +void spitfire_data_access_exception_tl1(struct pt_regs *regs, + unsigned long sfsr, + unsigned long sfar); +void spitfire_access_error(struct pt_regs *regs, + unsigned long status_encoded, + unsigned long afar); + +void cheetah_fecc_handler(struct pt_regs *regs, + unsigned long afsr, + unsigned long afar); +void cheetah_cee_handler(struct pt_regs *regs, + unsigned long afsr, + unsigned long afar); +void cheetah_deferred_handler(struct pt_regs *regs, + unsigned long afsr, + unsigned long afar); +void cheetah_plus_parity_error(int type, struct pt_regs *regs); + +void sun4v_insn_access_exception(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void sun4v_insn_access_exception_tl1(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void sun4v_data_access_exception(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void sun4v_data_access_exception_tl1(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void sun4v_resum_error(struct pt_regs *regs, + unsigned long offset); +void sun4v_resum_overflow(struct pt_regs *regs); +void sun4v_nonresum_error(struct pt_regs *regs, + unsigned long offset); +void sun4v_nonresum_overflow(struct pt_regs *regs); +void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, + unsigned long addr, + unsigned long context); + +extern unsigned long sun4v_err_itlb_vaddr; +extern unsigned long sun4v_err_itlb_ctx; +extern unsigned long sun4v_err_itlb_pte; +extern unsigned long sun4v_err_itlb_error; + +void sun4v_itlb_error_report(struct pt_regs *regs, int tl); + +extern unsigned long sun4v_err_dtlb_vaddr; +extern unsigned long sun4v_err_dtlb_ctx; +extern unsigned long sun4v_err_dtlb_pte; +extern unsigned long sun4v_err_dtlb_error; + +void sun4v_dtlb_error_report(struct pt_regs *regs, int tl); +void hypervisor_tlbop_error(unsigned long err, + unsigned long op); +void hypervisor_tlbop_error_xcall(unsigned long err, + unsigned long op); + +/* WARNING: The error trap handlers in assembly know the precise + * layout of the following structure. + * + * C-level handlers in traps.c use this information to log the + * error and then determine how to recover (if possible). + */ +struct cheetah_err_info { +/*0x00*/u64 afsr; +/*0x08*/u64 afar; + + /* D-cache state */ +/*0x10*/u64 dcache_data[4]; /* The actual data */ +/*0x30*/u64 dcache_index; /* D-cache index */ +/*0x38*/u64 dcache_tag; /* D-cache tag/valid */ +/*0x40*/u64 dcache_utag; /* D-cache microtag */ +/*0x48*/u64 dcache_stag; /* D-cache snooptag */ + + /* I-cache state */ +/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */ +/*0x90*/u64 icache_index; /* I-cache index */ +/*0x98*/u64 icache_tag; /* I-cache phys tag */ +/*0xa0*/u64 icache_utag; /* I-cache microtag */ +/*0xa8*/u64 icache_stag; /* I-cache snooptag */ +/*0xb0*/u64 icache_upper; /* I-cache upper-tag */ +/*0xb8*/u64 icache_lower; /* I-cache lower-tag */ + + /* E-cache state */ +/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */ +/*0xe0*/u64 ecache_index; /* E-cache index */ +/*0xe8*/u64 ecache_tag; /* E-cache tag/state */ + +/*0xf0*/u64 __pad[32 - 30]; +}; +#define CHAFSR_INVALID ((u64)-1L) + +/* This is allocated at boot time based upon the largest hardware + * cpu ID in the system. We allocate two entries per cpu, one for + * TL==0 logging and one for TL >= 1 logging. + */ +extern struct cheetah_err_info *cheetah_error_log; + +/* UPA nodes send interrupt packet to UltraSparc with first data reg + * value low 5 (7 on Starfire) bits holding the IRQ identifier being + * delivered. We must translate this into a non-vector IRQ so we can + * set the softint on this cpu. + * + * To make processing these packets efficient and race free we use + * an array of irq buckets below. The interrupt vector handler in + * entry.S feeds incoming packets into per-cpu pil-indexed lists. + * + * If you make changes to ino_bucket, please update hand coded assembler + * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S + */ +struct ino_bucket { +/*0x00*/unsigned long __irq_chain_pa; + + /* Interrupt number assigned to this INO. */ +/*0x08*/unsigned int __irq; +/*0x0c*/unsigned int __pad; +}; + +extern struct ino_bucket *ivector_table; +extern unsigned long ivector_table_pa; + +void init_irqwork_curcpu(void); +void sun4v_register_mondo_queues(int this_cpu); + +#endif /* CONFIG_SPARC32 */ +#endif /* _ENTRY_H */ diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S new file mode 100644 index 000000000..9f243f918 --- /dev/null +++ b/arch/sparc/kernel/etrap_32.S @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * etrap.S: Sparc trap window preparation for entry into the + * Linux kernel. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Registers to not touch at all. */ +#define t_psr l0 /* Set by caller */ +#define t_pc l1 /* Set by caller */ +#define t_npc l2 /* Set by caller */ +#define t_wim l3 /* Set by caller */ +#define t_twinmask l4 /* Set at beginning of this entry routine. */ +#define t_kstack l5 /* Set right before pt_regs frame is built */ +#define t_retpc l6 /* If you change this, change winmacro.h header file */ +#define t_systable l7 /* Never touch this, could be the syscall table ptr. */ +#define curptr g6 /* Set after pt_regs frame is built */ + + .text + .align 4 + + /* SEVEN WINDOW PATCH INSTRUCTIONS */ + .globl tsetup_7win_patch1, tsetup_7win_patch2 + .globl tsetup_7win_patch3, tsetup_7win_patch4 + .globl tsetup_7win_patch5, tsetup_7win_patch6 +tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim +tsetup_7win_patch2: and %g2, 0x7f, %g2 +tsetup_7win_patch3: and %g2, 0x7f, %g2 +tsetup_7win_patch4: and %g1, 0x7f, %g1 +tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim +tsetup_7win_patch6: and %g2, 0x7f, %g2 + /* END OF PATCH INSTRUCTIONS */ + + /* At trap time, interrupts and all generic traps do the + * following: + * + * rd %psr, %l0 + * b some_handler + * rd %wim, %l3 + * nop + * + * Then 'some_handler' if it needs a trap frame (ie. it has + * to call c-code and the trap cannot be handled in-window) + * then it does the SAVE_ALL macro in entry.S which does + * + * sethi %hi(trap_setup), %l4 + * jmpl %l4 + %lo(trap_setup), %l6 + * nop + */ + + /* 2 3 4 window number + * ----- + * O T S mnemonic + * + * O == Current window before trap + * T == Window entered when trap occurred + * S == Window we will need to save if (1<w_saved */ + st %g0, [%curptr + TI_W_SAVED] + + /* See if we are in the trap window. */ + andcc %t_twinmask, %t_wim, %g0 + bne trap_setup_user_spill ! yep we are + orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1 + + /* Trap from user, but not into the invalid window. + * Calculate new umask. The way this works is, + * any window from the %wim at trap time until + * the window right before the one we are in now, + * is a user window. A diagram: + * + * 7 6 5 4 3 2 1 0 window number + * --------------- + * I L T mnemonic + * + * Window 'I' is the invalid window in our example, + * window 'L' is the window the user was in when + * the trap occurred, window T is the trap window + * we are in now. So therefore, windows 5, 4 and + * 3 are user windows. The following sequence + * computes the user winmask to represent this. + */ + subcc %t_wim, %t_twinmask, %g2 + bneg,a 1f + sub %g2, 0x1, %g2 +1: + andn %g2, %t_twinmask, %g2 +tsetup_patch3: + and %g2, 0xff, %g2 ! patched on 7win Sparcs + st %g2, [%curptr + TI_UWINMASK] ! store new umask + + jmpl %t_retpc + 0x8, %g0 ! return to caller + mov %t_kstack, %sp ! and onto kernel stack + +trap_setup_user_spill: + /* A spill occurred from either kernel or user mode + * and there exist some user windows to deal with. + * A mask of the currently valid user windows + * is in %g1 upon entry to here. + */ + +tsetup_patch4: + and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask + srl %t_wim, 0x1, %g2 ! compute new %wim +tsetup_patch5: + sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs + or %t_wim, %g2, %g2 ! %g2 is new %wim +tsetup_patch6: + and %g2, 0xff, %g2 ! patched on 7win Sparcs + andn %g1, %g2, %g1 ! clear this bit in %g1 + st %g1, [%curptr + TI_UWINMASK] + + save %g0, %g0, %g0 + + wr %g2, 0x0, %wim + + /* Call MMU-architecture dependent stack checking + * routine. + */ + b tsetup_srmmu_stackchk + andcc %sp, 0x7, %g0 + + /* Architecture specific stack checking routines. When either + * of these routines are called, the globals are free to use + * as they have been safely stashed on the new kernel stack + * pointer. Thus the definition below for simplicity. + */ +#define glob_tmp g1 + + .globl tsetup_srmmu_stackchk +tsetup_srmmu_stackchk: + /* Check results of callers andcc %sp, 0x7, %g0 */ + bne trap_setup_user_stack_is_bolixed + sethi %hi(PAGE_OFFSET), %glob_tmp + + cmp %glob_tmp, %sp + bleu,a 1f +LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control +SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control + +trap_setup_user_stack_is_bolixed: + /* From user/kernel into invalid window w/bad user + * stack. Save bad user stack, and return to caller. + */ + SAVE_BOLIXED_USER_STACK(curptr, g3) + restore %g0, %g0, %g0 + + jmpl %t_retpc + 0x8, %g0 + mov %t_kstack, %sp + +1: + /* Clear the fault status and turn on the no_fault bit. */ + or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit +LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it +SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it + + /* Dump the registers and cross fingers. */ + STORE_WINDOW(sp) + + /* Clear the no_fault bit and check the status. */ + andn %glob_tmp, 0x2, %glob_tmp +LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) +SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) + + mov AC_M_SFAR, %glob_tmp +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) + + mov AC_M_SFSR, %glob_tmp +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore + + andcc %glob_tmp, 0x2, %g0 ! did we fault? + bne trap_setup_user_stack_is_bolixed ! failure + nop + + restore %g0, %g0, %g0 + + jmpl %t_retpc + 0x8, %g0 + mov %t_kstack, %sp + diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S new file mode 100644 index 000000000..08cc41f64 --- /dev/null +++ b/arch/sparc/kernel/etrap_64.S @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * etrap.S: Preparing for entry into the kernel on Sparc V9. + * + * Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz) + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ) +#define ETRAP_PSTATE1 (PSTATE_TSO | PSTATE_PRIV) +#define ETRAP_PSTATE2 \ + (PSTATE_TSO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE) + +/* + * On entry, %g7 is return address - 0x4. + * %g4 and %g5 will be preserved %l4 and %l5 respectively. + */ + + .text + .align 64 + .globl etrap_syscall, etrap, etrap_irq, etraptl1 +etrap: rdpr %pil, %g2 +etrap_irq: clr %g3 +etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1) + rdpr %tstate, %g1 + or %g1, %g3, %g1 + sllx %g2, 20, %g3 + andcc %g1, TSTATE_PRIV, %g0 + or %g1, %g3, %g1 + bne,pn %xcc, 1f + sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2 +661: wrpr %g0, 7, %cleanwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x85880000 ! allclean + .previous + + sethi %hi(TASK_REGOFF), %g2 + sethi %hi(TSTATE_PEF), %g3 + or %g2, %lo(TASK_REGOFF), %g2 + and %g1, %g3, %g3 + brnz,pn %g3, 1f + add %g6, %g2, %g2 + wr %g0, 0, %fprs +1: rdpr %tpc, %g3 + + stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE] + rdpr %tnpc, %g1 + stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC] + rd %y, %g3 + stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] + rdpr %tt, %g1 + st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] + sethi %hi(PT_REGS_MAGIC), %g3 + or %g3, %g1, %g1 + st %g1, [%g2 + STACKFRAME_SZ + PT_V9_MAGIC] + + rdpr %cansave, %g1 + brnz,pt %g1, etrap_save + nop + + rdpr %cwp, %g1 + add %g1, 2, %g1 + wrpr %g1, %cwp + be,pt %xcc, etrap_user_spill + mov ASI_AIUP, %g3 + + rdpr %otherwin, %g3 + brz %g3, etrap_kernel_spill + mov ASI_AIUS, %g3 + +etrap_user_spill: + + wr %g3, 0x0, %asi + ldx [%g6 + TI_FLAGS], %g3 + and %g3, _TIF_32BIT, %g3 + brnz,pt %g3, etrap_user_spill_32bit + nop + ba,a,pt %xcc, etrap_user_spill_64bit + +etrap_save: save %g2, -STACK_BIAS, %sp + mov %g6, %l6 + + bne,pn %xcc, 3f + mov PRIMARY_CONTEXT, %l4 +661: rdpr %canrestore, %g3 + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous + + rdpr %wstate, %g2 +661: wrpr %g0, 0, %canrestore + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous + sll %g2, 3, %g2 + + /* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */ + mov 1, %l5 + sth %l5, [%l6 + TI_SYS_NOERROR] + +661: wrpr %g3, 0, %otherwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x87880000 ! otherw + .previous + + wrpr %g2, 0, %wstate + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 + +661: stxa %g3, [%l4] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g3, [%l4] ASI_MMU + .previous + + sethi %hi(KERNBASE), %l4 + flush %l4 + mov ASI_AIUS, %l7 +2: mov %g4, %l4 + mov %g5, %l5 + add %g7, 4, %l2 + + /* Go to trap time globals so we can save them. */ +661: wrpr %g0, ETRAP_PSTATE1, %pstate + .section .sun4v_1insn_patch, "ax" + .word 661b + SET_GL(0) + .previous + + stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] + stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] + sllx %l7, 24, %l7 + stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] + rdpr %cwp, %l0 + stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] + stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] + stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] + stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] + or %l7, %l0, %l7 +661: sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0 + /* If userspace is using ADI, it could potentially pass + * a pointer with version tag embedded in it. To maintain + * the ADI security, we must enable PSTATE.mcde. Userspace + * would have already set TTE.mcd in an earlier call to + * kernel and set the version tag for the address being + * dereferenced. Setting PSTATE.mcde would ensure any + * access to userspace data through a system call honors + * ADI and does not allow a rogue app to bypass ADI by + * using system calls. Setting PSTATE.mcde only affects + * accesses to virtual addresses that have TTE.mcd set. + * Set PMCDPER to ensure any exceptions caused by ADI + * version tag mismatch are exposed before system call + * returns to userspace. Setting PMCDPER affects only + * writes to virtual addresses that have TTE.mcd set and + * have a version tag set as well. + */ + .section .sun_m7_1insn_patch, "ax" + .word 661b + sethi %hi(TSTATE_TSO | TSTATE_PEF | TSTATE_MCDE), %l0 + .previous +661: nop + .section .sun_m7_1insn_patch, "ax" + .word 661b + .word 0xaf902001 /* wrpr %g0, 1, %pmcdper */ + .previous + or %l7, %l0, %l7 + wrpr %l2, %tnpc + wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate + stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] + stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] + stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] + stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] + stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] + stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] + stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] + mov %l6, %g6 + stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] + LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) + ldx [%g6 + TI_TASK], %g4 + done + +3: mov ASI_P, %l7 + ldub [%l6 + TI_FPDEPTH], %l5 + add %l6, TI_FPSAVED + 1, %l4 + srl %l5, 1, %l3 + add %l5, 2, %l5 + + /* Set TI_SYS_FPDEPTH to %l5 and clear TI_SYS_NOERROR. */ + sth %l5, [%l6 + TI_SYS_NOERROR] + ba,pt %xcc, 2b + stb %g0, [%l4 + %l3] + nop + +etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. + * We place this right after pt_regs on the trap stack. + * The layout is: + * 0x00 TL1's TSTATE + * 0x08 TL1's TPC + * 0x10 TL1's TNPC + * 0x18 TL1's TT + * ... + * 0x58 TL4's TT + * 0x60 TL + */ + TRAP_LOAD_THREAD_REG(%g6, %g1) + sub %sp, ((4 * 8) * 4) + 8, %g2 + rdpr %tl, %g1 + + wrpr %g0, 1, %tl + rdpr %tstate, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x00] + rdpr %tpc, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x08] + rdpr %tnpc, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x10] + rdpr %tt, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x18] + + wrpr %g0, 2, %tl + rdpr %tstate, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x20] + rdpr %tpc, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x28] + rdpr %tnpc, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x30] + rdpr %tt, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x38] + + sethi %hi(is_sun4v), %g3 + lduw [%g3 + %lo(is_sun4v)], %g3 + brnz,pn %g3, finish_tl1_capture + nop + + wrpr %g0, 3, %tl + rdpr %tstate, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x40] + rdpr %tpc, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x48] + rdpr %tnpc, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x50] + rdpr %tt, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x58] + + wrpr %g0, 4, %tl + rdpr %tstate, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x60] + rdpr %tpc, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x68] + rdpr %tnpc, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x70] + rdpr %tt, %g3 + stx %g3, [%g2 + STACK_BIAS + 0x78] + + stx %g1, [%g2 + STACK_BIAS + 0x80] + +finish_tl1_capture: + wrpr %g0, 1, %tl +661: nop + .section .sun4v_1insn_patch, "ax" + .word 661b + SET_GL(1) + .previous + + rdpr %tstate, %g1 + sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 + ba,pt %xcc, 1b + andcc %g1, TSTATE_PRIV, %g0 + +#undef TASK_REGOFF +#undef ETRAP_PSTATE1 diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S new file mode 100644 index 000000000..051659e29 --- /dev/null +++ b/arch/sparc/kernel/fpu_traps.S @@ -0,0 +1,384 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + /* This is trivial with the new code... */ + .globl do_fpdis + .type do_fpdis,#function +do_fpdis: + sethi %hi(TSTATE_PEF), %g4 + rdpr %tstate, %g5 + andcc %g5, %g4, %g0 + be,pt %xcc, 1f + nop + rd %fprs, %g5 + andcc %g5, FPRS_FEF, %g0 + be,pt %xcc, 1f + nop + + /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */ + sethi %hi(109f), %g7 + ba,pt %xcc, etrap +109: or %g7, %lo(109b), %g7 + add %g0, %g0, %g0 + ba,a,pt %xcc, rtrap + +1: TRAP_LOAD_THREAD_REG(%g6, %g1) + ldub [%g6 + TI_FPSAVED], %g5 + wr %g0, FPRS_FEF, %fprs + andcc %g5, FPRS_FEF, %g0 + be,a,pt %icc, 1f + clr %g7 + ldx [%g6 + TI_GSR], %g7 +1: andcc %g5, FPRS_DL, %g0 + bne,pn %icc, 2f + fzero %f0 + andcc %g5, FPRS_DU, %g0 + bne,pn %icc, 1f + fzero %f2 + faddd %f0, %f2, %f4 + fmuld %f0, %f2, %f6 + faddd %f0, %f2, %f8 + fmuld %f0, %f2, %f10 + faddd %f0, %f2, %f12 + fmuld %f0, %f2, %f14 + faddd %f0, %f2, %f16 + fmuld %f0, %f2, %f18 + faddd %f0, %f2, %f20 + fmuld %f0, %f2, %f22 + faddd %f0, %f2, %f24 + fmuld %f0, %f2, %f26 + faddd %f0, %f2, %f28 + fmuld %f0, %f2, %f30 + faddd %f0, %f2, %f32 + fmuld %f0, %f2, %f34 + faddd %f0, %f2, %f36 + fmuld %f0, %f2, %f38 + faddd %f0, %f2, %f40 + fmuld %f0, %f2, %f42 + faddd %f0, %f2, %f44 + fmuld %f0, %f2, %f46 + faddd %f0, %f2, %f48 + fmuld %f0, %f2, %f50 + faddd %f0, %f2, %f52 + fmuld %f0, %f2, %f54 + faddd %f0, %f2, %f56 + fmuld %f0, %f2, %f58 + b,pt %xcc, fpdis_exit2 + faddd %f0, %f2, %f60 +1: mov SECONDARY_CONTEXT, %g3 + add %g6, TI_FPREGS + 0x80, %g1 + faddd %f0, %f2, %f4 + fmuld %f0, %f2, %f6 + +661: ldxa [%g3] ASI_DMMU, %g5 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%g3] ASI_MMU, %g5 + .previous + + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 + +661: stxa %g2, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g3] ASI_MMU + .previous + + membar #Sync + add %g6, TI_FPREGS + 0xc0, %g2 + faddd %f0, %f2, %f8 + fmuld %f0, %f2, %f10 + membar #Sync + ldda [%g1] ASI_BLK_S, %f32 + ldda [%g2] ASI_BLK_S, %f48 + membar #Sync + faddd %f0, %f2, %f12 + fmuld %f0, %f2, %f14 + faddd %f0, %f2, %f16 + fmuld %f0, %f2, %f18 + faddd %f0, %f2, %f20 + fmuld %f0, %f2, %f22 + faddd %f0, %f2, %f24 + fmuld %f0, %f2, %f26 + faddd %f0, %f2, %f28 + fmuld %f0, %f2, %f30 + ba,a,pt %xcc, fpdis_exit + +2: andcc %g5, FPRS_DU, %g0 + bne,pt %icc, 3f + fzero %f32 + mov SECONDARY_CONTEXT, %g3 + fzero %f34 + +661: ldxa [%g3] ASI_DMMU, %g5 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%g3] ASI_MMU, %g5 + .previous + + add %g6, TI_FPREGS, %g1 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 + +661: stxa %g2, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g3] ASI_MMU + .previous + + membar #Sync + add %g6, TI_FPREGS + 0x40, %g2 + faddd %f32, %f34, %f36 + fmuld %f32, %f34, %f38 + membar #Sync + ldda [%g1] ASI_BLK_S, %f0 + ldda [%g2] ASI_BLK_S, %f16 + membar #Sync + faddd %f32, %f34, %f40 + fmuld %f32, %f34, %f42 + faddd %f32, %f34, %f44 + fmuld %f32, %f34, %f46 + faddd %f32, %f34, %f48 + fmuld %f32, %f34, %f50 + faddd %f32, %f34, %f52 + fmuld %f32, %f34, %f54 + faddd %f32, %f34, %f56 + fmuld %f32, %f34, %f58 + faddd %f32, %f34, %f60 + fmuld %f32, %f34, %f62 + ba,a,pt %xcc, fpdis_exit + +3: mov SECONDARY_CONTEXT, %g3 + add %g6, TI_FPREGS, %g1 + +661: ldxa [%g3] ASI_DMMU, %g5 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%g3] ASI_MMU, %g5 + .previous + + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 + +661: stxa %g2, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g3] ASI_MMU + .previous + + membar #Sync + mov 0x40, %g2 + membar #Sync + ldda [%g1] ASI_BLK_S, %f0 + ldda [%g1 + %g2] ASI_BLK_S, %f16 + add %g1, 0x80, %g1 + ldda [%g1] ASI_BLK_S, %f32 + ldda [%g1 + %g2] ASI_BLK_S, %f48 + membar #Sync +fpdis_exit: + +661: stxa %g5, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g5, [%g3] ASI_MMU + .previous + + membar #Sync +fpdis_exit2: + wr %g7, 0, %gsr + ldx [%g6 + TI_XFSR], %fsr + rdpr %tstate, %g3 + or %g3, %g4, %g3 ! anal... + wrpr %g3, %tstate + wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits + retry + .size do_fpdis,.-do_fpdis + + .align 32 + .type fp_other_bounce,#function +fp_other_bounce: + call do_fpother + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size fp_other_bounce,.-fp_other_bounce + + .align 32 + .globl do_fpother_check_fitos + .type do_fpother_check_fitos,#function +do_fpother_check_fitos: + TRAP_LOAD_THREAD_REG(%g6, %g1) + sethi %hi(fp_other_bounce - 4), %g7 + or %g7, %lo(fp_other_bounce - 4), %g7 + + /* NOTE: Need to preserve %g7 until we fully commit + * to the fitos fixup. + */ + stx %fsr, [%g6 + TI_XFSR] + rdpr %tstate, %g3 + andcc %g3, TSTATE_PRIV, %g0 + bne,pn %xcc, do_fptrap_after_fsr + nop + ldx [%g6 + TI_XFSR], %g3 + srlx %g3, 14, %g1 + and %g1, 7, %g1 + cmp %g1, 2 ! Unfinished FP-OP + bne,pn %xcc, do_fptrap_after_fsr + sethi %hi(1 << 23), %g1 ! Inexact + andcc %g3, %g1, %g0 + bne,pn %xcc, do_fptrap_after_fsr + rdpr %tpc, %g1 + lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail +#define FITOS_MASK 0xc1f83fe0 +#define FITOS_COMPARE 0x81a01880 + sethi %hi(FITOS_MASK), %g1 + or %g1, %lo(FITOS_MASK), %g1 + and %g3, %g1, %g1 + sethi %hi(FITOS_COMPARE), %g2 + or %g2, %lo(FITOS_COMPARE), %g2 + cmp %g1, %g2 + bne,pn %xcc, do_fptrap_after_fsr + nop + std %f62, [%g6 + TI_FPREGS + (62 * 4)] + sethi %hi(fitos_table_1), %g1 + and %g3, 0x1f, %g2 + or %g1, %lo(fitos_table_1), %g1 + sllx %g2, 2, %g2 + jmpl %g1 + %g2, %g0 + ba,pt %xcc, fitos_emul_continue + +fitos_table_1: + fitod %f0, %f62 + fitod %f1, %f62 + fitod %f2, %f62 + fitod %f3, %f62 + fitod %f4, %f62 + fitod %f5, %f62 + fitod %f6, %f62 + fitod %f7, %f62 + fitod %f8, %f62 + fitod %f9, %f62 + fitod %f10, %f62 + fitod %f11, %f62 + fitod %f12, %f62 + fitod %f13, %f62 + fitod %f14, %f62 + fitod %f15, %f62 + fitod %f16, %f62 + fitod %f17, %f62 + fitod %f18, %f62 + fitod %f19, %f62 + fitod %f20, %f62 + fitod %f21, %f62 + fitod %f22, %f62 + fitod %f23, %f62 + fitod %f24, %f62 + fitod %f25, %f62 + fitod %f26, %f62 + fitod %f27, %f62 + fitod %f28, %f62 + fitod %f29, %f62 + fitod %f30, %f62 + fitod %f31, %f62 + +fitos_emul_continue: + sethi %hi(fitos_table_2), %g1 + srl %g3, 25, %g2 + or %g1, %lo(fitos_table_2), %g1 + and %g2, 0x1f, %g2 + sllx %g2, 2, %g2 + jmpl %g1 + %g2, %g0 + ba,pt %xcc, fitos_emul_fini + +fitos_table_2: + fdtos %f62, %f0 + fdtos %f62, %f1 + fdtos %f62, %f2 + fdtos %f62, %f3 + fdtos %f62, %f4 + fdtos %f62, %f5 + fdtos %f62, %f6 + fdtos %f62, %f7 + fdtos %f62, %f8 + fdtos %f62, %f9 + fdtos %f62, %f10 + fdtos %f62, %f11 + fdtos %f62, %f12 + fdtos %f62, %f13 + fdtos %f62, %f14 + fdtos %f62, %f15 + fdtos %f62, %f16 + fdtos %f62, %f17 + fdtos %f62, %f18 + fdtos %f62, %f19 + fdtos %f62, %f20 + fdtos %f62, %f21 + fdtos %f62, %f22 + fdtos %f62, %f23 + fdtos %f62, %f24 + fdtos %f62, %f25 + fdtos %f62, %f26 + fdtos %f62, %f27 + fdtos %f62, %f28 + fdtos %f62, %f29 + fdtos %f62, %f30 + fdtos %f62, %f31 + +fitos_emul_fini: + ldd [%g6 + TI_FPREGS + (62 * 4)], %f62 + done + .size do_fpother_check_fitos,.-do_fpother_check_fitos + + .align 32 + .globl do_fptrap + .type do_fptrap,#function +do_fptrap: + TRAP_LOAD_THREAD_REG(%g6, %g1) + stx %fsr, [%g6 + TI_XFSR] +do_fptrap_after_fsr: + ldub [%g6 + TI_FPSAVED], %g3 + rd %fprs, %g1 + or %g3, %g1, %g3 + stb %g3, [%g6 + TI_FPSAVED] + rd %gsr, %g3 + stx %g3, [%g6 + TI_GSR] + mov SECONDARY_CONTEXT, %g3 + +661: ldxa [%g3] ASI_DMMU, %g5 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%g3] ASI_MMU, %g5 + .previous + + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 + +661: stxa %g2, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g3] ASI_MMU + .previous + + membar #Sync + add %g6, TI_FPREGS, %g2 + andcc %g1, FPRS_DL, %g0 + be,pn %icc, 4f + mov 0x40, %g3 + stda %f0, [%g2] ASI_BLK_S + stda %f16, [%g2 + %g3] ASI_BLK_S + andcc %g1, FPRS_DU, %g0 + be,pn %icc, 5f +4: add %g2, 128, %g2 + stda %f32, [%g2] ASI_BLK_S + stda %f48, [%g2 + %g3] ASI_BLK_S +5: mov SECONDARY_CONTEXT, %g1 + membar #Sync + +661: stxa %g5, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g5, [%g1] ASI_MMU + .previous + + membar #Sync + ba,pt %xcc, etrap + wr %g0, 0, %fprs + .size do_fptrap,.-do_fptrap diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c new file mode 100644 index 000000000..eaead3da8 --- /dev/null +++ b/arch/sparc/kernel/ftrace.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_DYNAMIC_FTRACE +static const u32 ftrace_nop = 0x01000000; + +static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) +{ + u32 call; + s32 off; + + off = ((s32)addr - (s32)ip); + call = 0x40000000 | ((u32)off >> 2); + + return call; +} + +static int ftrace_modify_code(unsigned long ip, u32 old, u32 new) +{ + u32 replaced; + int faulted; + + __asm__ __volatile__( + "1: cas [%[ip]], %[old], %[new]\n" + " flush %[ip]\n" + " mov 0, %[faulted]\n" + "2:\n" + " .section .fixup,#alloc,#execinstr\n" + " .align 4\n" + "3: sethi %%hi(2b), %[faulted]\n" + " jmpl %[faulted] + %%lo(2b), %%g0\n" + " mov 1, %[faulted]\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .align 4\n" + " .word 1b, 3b\n" + " .previous\n" + : "=r" (replaced), [faulted] "=r" (faulted) + : [new] "0" (new), [old] "r" (old), [ip] "r" (ip) + : "memory"); + + if (replaced != old && replaced != new) + faulted = 2; + + return faulted; +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip; + u32 old, new; + + old = ftrace_call_replace(ip, addr); + new = ftrace_nop; + return ftrace_modify_code(ip, old, new); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip; + u32 old, new; + + old = ftrace_nop; + new = ftrace_call_replace(ip, addr); + return ftrace_modify_code(ip, old, new); +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + u32 old, new; + + old = *(u32 *) &ftrace_call; + new = ftrace_call_replace(ip, (unsigned long)func); + return ftrace_modify_code(ip, old, new); +} +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + u32 old, new; + + old = *(u32 *) &ftrace_graph_call; + new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); + return ftrace_modify_code(ip, old, new); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + u32 old, new; + + old = *(u32 *) &ftrace_graph_call; + new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); + + return ftrace_modify_code(ip, old, new); +} + +#endif /* !CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +unsigned long prepare_ftrace_return(unsigned long parent, + unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long) &return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return parent + 8UL; + + if (function_graph_enter(parent, self_addr, frame_pointer, NULL)) + return parent + 8UL; + + return return_hooker; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/sparc/kernel/getsetcc.S b/arch/sparc/kernel/getsetcc.S new file mode 100644 index 000000000..181e09fd1 --- /dev/null +++ b/arch/sparc/kernel/getsetcc.S @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + .globl getcc + .type getcc,#function +getcc: + ldx [%o0 + PT_V9_TSTATE], %o1 + srlx %o1, 32, %o1 + and %o1, 0xf, %o1 + retl + stx %o1, [%o0 + PT_V9_G1] + .size getcc,.-getcc + + .globl setcc + .type setcc,#function +setcc: + ldx [%o0 + PT_V9_TSTATE], %o1 + ldx [%o0 + PT_V9_G1], %o2 + or %g0, %ulo(TSTATE_ICC), %o3 + sllx %o3, 32, %o3 + andn %o1, %o3, %o1 + sllx %o2, 32, %o2 + and %o2, %o3, %o2 + or %o1, %o2, %o1 + retl + stx %o1, [%o0 + PT_V9_TSTATE] + .size setcc,.-setcc diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S new file mode 100644 index 000000000..6044b82b9 --- /dev/null +++ b/arch/sparc/kernel/head_32.S @@ -0,0 +1,812 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * head.S: The initial boot code for the Sparc port of Linux. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1995,1999 Pete Zaitcev (zaitcev@yahoo.com) + * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997 Michael A. Griffith (grif@acm.org) + * + * CompactPCI platform by Eric Brower, 1999. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* TI_UWINMASK */ +#include +#include /* PGDIR_SHIFT */ +#include + + .data +/* The following are used with the prom_vector node-ops to figure out + * the cpu-type + */ + .align 4 + .globl cputypval +cputypval: + .asciz "sun4m" + .ascii " " + +/* Tested on SS-5, SS-10 */ + .align 4 +cputypvar: + .asciz "compatible" + + .align 4 + +notsup: + .asciz "Sparc-Linux sun4/sun4c or MMU-less not supported\n\n" + .align 4 + +sun4e_notsup: + .asciz "Sparc-Linux sun4e support does not exist\n\n" + .align 4 + +/* The trap-table - located in the __HEAD section */ +#include "ttable_32.S" + + .align PAGE_SIZE + +/* This was the only reasonable way I could think of to properly align + * these page-table data structures. + */ + .globl empty_zero_page +empty_zero_page: .skip PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) + + .global root_flags + .global ram_flags + .global root_dev + .global sparc_ramdisk_image + .global sparc_ramdisk_size + +/* This stuff has to be in sync with SILO and other potential boot loaders + * Fields should be kept upward compatible and whenever any change is made, + * HdrS version should be incremented. + */ + .ascii "HdrS" + .word LINUX_VERSION_CODE + .half 0x0203 /* HdrS version */ +root_flags: + .half 1 +root_dev: + .half 0 +ram_flags: + .half 0 +sparc_ramdisk_image: + .word 0 +sparc_ramdisk_size: + .word 0 + .word reboot_command + .word 0, 0, 0 + .word _end + +/* Cool, here we go. Pick up the romvec pointer in %o0 and stash it in + * %g7 and at prom_vector_p. And also quickly check whether we are on + * a v0, v2, or v3 prom. + */ +gokernel: + /* Ok, it's nice to know, as early as possible, if we + * are already mapped where we expect to be in virtual + * memory. The Solaris /boot elf format bootloader + * will peek into our elf header and load us where + * we want to be, otherwise we have to re-map. + * + * Some boot loaders don't place the jmp'rs address + * in %o7, so we do a pc-relative call to a local + * label, then see what %o7 has. + */ + + mov %o7, %g4 ! Save %o7 + + /* Jump to it, and pray... */ +current_pc: + call 1f + nop + +1: + mov %o7, %g3 + + tst %o0 + be no_sun4u_here + mov %g4, %o7 /* Previous %o7. */ + + mov %o0, %l0 ! stash away romvec + mov %o0, %g7 ! put it here too + mov %o1, %l1 ! stash away debug_vec too + + /* Ok, let's check out our run time program counter. */ + set current_pc, %g5 + cmp %g3, %g5 + be already_mapped + nop + + /* %l6 will hold the offset we have to subtract + * from absolute symbols in order to access areas + * in our own image. If already mapped this is + * just plain zero, else it is KERNBASE. + */ + set KERNBASE, %l6 + b copy_prom_lvl14 + nop + +already_mapped: + mov 0, %l6 + + /* Copy over the Prom's level 14 clock handler. */ +copy_prom_lvl14: +#if 1 + /* DJHR + * preserve our linked/calculated instructions + */ + set lvl14_save, %g1 + set t_irq14, %g3 + sub %g1, %l6, %g1 ! translate to physical + sub %g3, %l6, %g3 ! translate to physical + ldd [%g3], %g4 + std %g4, [%g1] + ldd [%g3+8], %g4 + std %g4, [%g1+8] +#endif + rd %tbr, %g1 + andn %g1, 0xfff, %g1 ! proms trap table base + or %g0, (0x1e<<4), %g2 ! offset to lvl14 intr + or %g1, %g2, %g2 + set t_irq14, %g3 + sub %g3, %l6, %g3 + ldd [%g2], %g4 + std %g4, [%g3] + ldd [%g2 + 0x8], %g4 + std %g4, [%g3 + 0x8] ! Copy proms handler + +/* DON'T TOUCH %l0 thru %l5 in these remapping routines, + * we need their values afterwards! + */ + + /* Now check whether we are already mapped, if we + * are we can skip all this garbage coming up. + */ +copy_prom_done: + cmp %l6, 0 + be go_to_highmem ! this will be a nop then + nop + + /* Validate that we are in fact running on an + * SRMMU based cpu. + */ + set 0x4000, %g6 + cmp %g7, %g6 + bne not_a_sun4 + nop + +halt_notsup: + ld [%g7 + 0x68], %o1 + set notsup, %o0 + sub %o0, %l6, %o0 + call %o1 + nop + ba halt_me + nop + +not_a_sun4: + /* It looks like this is a machine we support. + * Now find out what MMU we are dealing with + * LEON - identified by the psr.impl field + * Viking - identified by the psr.impl field + * In all other cases a sun4m srmmu. + * We check that the MMU is enabled in all cases. + */ + + /* Check if this is a LEON CPU */ + rd %psr, %g3 + srl %g3, PSR_IMPL_SHIFT, %g3 + and %g3, PSR_IMPL_SHIFTED_MASK, %g3 + cmp %g3, PSR_IMPL_LEON + be leon_remap /* It is a LEON - jump */ + nop + + /* Sanity-check, is MMU enabled */ + lda [%g0] ASI_M_MMUREGS, %g1 + andcc %g1, 1, %g0 + be halt_notsup + nop + + /* Check for a viking (TI) module. */ + cmp %g3, PSR_IMPL_TI + bne srmmu_not_viking + nop + + /* Figure out what kind of viking we are on. + * We need to know if we have to play with the + * AC bit and disable traps or not. + */ + + /* I've only seen MicroSparc's on SparcClassics with this + * bit set. + */ + set 0x800, %g2 + lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg + and %g2, %g3, %g3 + subcc %g3, 0x0, %g0 + bnz srmmu_not_viking ! is in mbus mode + nop + + rd %psr, %g3 ! DO NOT TOUCH %g3 + andn %g3, PSR_ET, %g2 + wr %g2, 0x0, %psr + WRITE_PAUSE + + /* Get context table pointer, then convert to + * a physical address, which is 36 bits. + */ + set AC_M_CTPR, %g4 + lda [%g4] ASI_M_MMUREGS, %g4 + sll %g4, 0x4, %g4 ! We use this below + ! DO NOT TOUCH %g4 + + /* Set the AC bit in the Viking's MMU control reg. */ + lda [%g0] ASI_M_MMUREGS, %g5 ! DO NOT TOUCH %g5 + set 0x8000, %g6 ! AC bit mask + or %g5, %g6, %g6 ! Or it in... + sta %g6, [%g0] ASI_M_MMUREGS ! Close your eyes... + + /* Grrr, why does it seem like every other load/store + * on the sun4m is in some ASI space... + * Fine with me, let's get the pointer to the level 1 + * page table directory and fetch its entry. + */ + lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr + srl %o1, 0x4, %o1 ! Clear low 4 bits + sll %o1, 0x8, %o1 ! Make physical + + /* Ok, pull in the PTD. */ + lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd + + /* Calculate to KERNBASE entry. */ + add %o1, KERNBASE >> (PGDIR_SHIFT - 2), %o3 + + /* Poke the entry into the calculated address. */ + sta %o2, [%o3] ASI_M_BYPASS + + /* I don't get it Sun, if you engineered all these + * boot loaders and the PROM (thank you for the debugging + * features btw) why did you not have them load kernel + * images up in high address space, since this is necessary + * for ABI compliance anyways? Does this low-mapping provide + * enhanced interoperability? + * + * "The PROM is the computer." + */ + + /* Ok, restore the MMU control register we saved in %g5 */ + sta %g5, [%g0] ASI_M_MMUREGS ! POW... ouch + + /* Turn traps back on. We saved it in %g3 earlier. */ + wr %g3, 0x0, %psr ! tick tock, tick tock + + /* Now we burn precious CPU cycles due to bad engineering. */ + WRITE_PAUSE + + /* Wow, all that just to move a 32-bit value from one + * place to another... Jump to high memory. + */ + b go_to_highmem + nop + +srmmu_not_viking: + /* This works on viking's in Mbus mode and all + * other MBUS modules. It is virtually the same as + * the above madness sans turning traps off and flipping + * the AC bit. + */ + set AC_M_CTPR, %g1 + lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr + sll %g1, 0x4, %g1 ! make physical addr + lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table + srl %g1, 0x4, %g1 + sll %g1, 0x8, %g1 ! make phys addr for l1 tbl + + lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0 + add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3 + sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry + b go_to_highmem + nop ! wheee.... + + +leon_remap: + /* Sanity-check, is MMU enabled */ + lda [%g0] ASI_LEON_MMUREGS, %g1 + andcc %g1, 1, %g0 + be halt_notsup + nop + + /* Same code as in the srmmu_not_viking case, + * with the LEON ASI for mmuregs + */ + set AC_M_CTPR, %g1 + lda [%g1] ASI_LEON_MMUREGS, %g1 ! get ctx table ptr + sll %g1, 0x4, %g1 ! make physical addr + lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table + srl %g1, 0x4, %g1 + sll %g1, 0x8, %g1 ! make phys addr for l1 tbl + + lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0 + add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3 + sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry + b go_to_highmem + nop ! wheee.... + +/* Now do a non-relative jump so that PC is in high-memory */ +go_to_highmem: + set execute_in_high_mem, %g1 + jmpl %g1, %g0 + nop + +/* The code above should be at beginning and we have to take care about + * short jumps, as branching to .init.text section from .text is usually + * impossible */ + __INIT +/* Acquire boot time privileged register values, this will help debugging. + * I figure out and store nwindows and nwindowsm1 later on. + */ +execute_in_high_mem: + mov %l0, %o0 ! put back romvec + mov %l1, %o1 ! and debug_vec + + sethi %hi(prom_vector_p), %g1 + st %o0, [%g1 + %lo(prom_vector_p)] + + sethi %hi(linux_dbvec), %g1 + st %o1, [%g1 + %lo(linux_dbvec)] + + /* Get the machine type via the romvec + * getprops node operation + */ + add %g7, 0x1c, %l1 + ld [%l1], %l0 + ld [%l0], %l0 + call %l0 + or %g0, %g0, %o0 ! next_node(0) = first_node + or %o0, %g0, %g6 + + sethi %hi(cputypvar), %o1 ! First node has cpu-arch + or %o1, %lo(cputypvar), %o1 + sethi %hi(cputypval), %o2 ! information, the string + or %o2, %lo(cputypval), %o2 + ld [%l1], %l0 ! 'compatible' tells + ld [%l0 + 0xc], %l0 ! that we want 'sun4x' where + call %l0 ! x is one of 'm', 'd' or 'e'. + nop ! %o2 holds pointer + ! to a buf where above string + ! will get stored by the prom. + + + /* Check value of "compatible" property. + * "value" => "model" + * leon => sparc_leon + * sun4m => sun4m + * sun4s => sun4m + * sun4d => sun4d + * sun4e => "no_sun4e_here" + * '*' => "no_sun4u_here" + * Check single letters only + */ + + set cputypval, %o2 + /* If cputypval[0] == 'l' (lower case letter L) this is leon */ + ldub [%o2], %l1 + cmp %l1, 'l' + be leon_init + nop + + /* Check cputypval[4] to find the sun model */ + ldub [%o2 + 0x4], %l1 + + cmp %l1, 'm' + be sun4m_init + cmp %l1, 's' + be sun4m_init + cmp %l1, 'd' + be sun4d_init + cmp %l1, 'e' + be no_sun4e_here ! Could be a sun4e. + nop + b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :)) + nop + +leon_init: + /* LEON CPU - set boot_cpu_id */ + sethi %hi(boot_cpu_id), %g2 ! boot-cpu index + +#ifdef CONFIG_SMP + ldub [%g2 + %lo(boot_cpu_id)], %g1 + cmp %g1, 0xff ! unset means first CPU + bne leon_smp_cpu_startup ! continue only with master + nop +#endif + /* Get CPU-ID from most significant 4-bit of ASR17 */ + rd %asr17, %g1 + srl %g1, 28, %g1 + + /* Update boot_cpu_id only on boot cpu */ + stub %g1, [%g2 + %lo(boot_cpu_id)] + + ba continue_boot + nop + +/* CPUID in bootbus can be found at PA 0xff0140000 */ +#define SUN4D_BOOTBUS_CPUID 0xf0140000 + +sun4d_init: + /* Need to patch call to handler_irq */ + set patch_handler_irq, %g4 + set sun4d_handler_irq, %g5 + sethi %hi(0x40000000), %g3 ! call + sub %g5, %g4, %g5 + srl %g5, 2, %g5 + or %g5, %g3, %g5 + st %g5, [%g4] + +#ifdef CONFIG_SMP + /* Get our CPU id out of bootbus */ + set SUN4D_BOOTBUS_CPUID, %g3 + lduba [%g3] ASI_M_CTL, %g3 + and %g3, 0xf8, %g3 + srl %g3, 3, %g4 + sta %g4, [%g0] ASI_M_VIKING_TMP1 + sethi %hi(boot_cpu_id), %g5 + stb %g4, [%g5 + %lo(boot_cpu_id)] +#endif + + /* Fall through to sun4m_init */ + +sun4m_init: +/* Ok, the PROM could have done funny things and apple cider could still + * be sitting in the fault status/address registers. Read them all to + * clear them so we don't get magic faults later on. + */ +/* This sucks, apparently this makes Vikings call prom panic, will fix later */ +2: + rd %psr, %o1 + srl %o1, PSR_IMPL_SHIFT, %o1 ! Get a type of the CPU + + subcc %o1, PSR_IMPL_TI, %g0 ! TI: Viking or MicroSPARC + be continue_boot + nop + + set AC_M_SFSR, %o0 + lda [%o0] ASI_M_MMUREGS, %g0 + set AC_M_SFAR, %o0 + lda [%o0] ASI_M_MMUREGS, %g0 + + /* Fujitsu MicroSPARC-II has no asynchronous flavors of FARs */ + subcc %o1, 0, %g0 + be continue_boot + nop + + set AC_M_AFSR, %o0 + lda [%o0] ASI_M_MMUREGS, %g0 + set AC_M_AFAR, %o0 + lda [%o0] ASI_M_MMUREGS, %g0 + nop + + +continue_boot: + +/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's + * show-time! + */ + /* Turn on Supervisor, EnableFloating, and all the PIL bits. + * Also puts us in register window zero with traps off. + */ + set (PSR_PS | PSR_S | PSR_PIL | PSR_EF), %g2 + wr %g2, 0x0, %psr + WRITE_PAUSE + + /* I want a kernel stack NOW! */ + set init_thread_union, %g1 + set (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2 + add %g1, %g2, %sp + mov 0, %fp /* And for good luck */ + + /* Zero out our BSS section. */ + set __bss_start , %o0 ! First address of BSS + set _end , %o1 ! Last address of BSS + add %o0, 0x1, %o0 +1: + stb %g0, [%o0] + subcc %o0, %o1, %g0 + bl 1b + add %o0, 0x1, %o0 + + /* If boot_cpu_id has not been setup by machine specific + * init-code above we default it to zero. + */ + sethi %hi(boot_cpu_id), %g2 + ldub [%g2 + %lo(boot_cpu_id)], %g3 + cmp %g3, 0xff + bne 1f + nop + mov %g0, %g3 + stub %g3, [%g2 + %lo(boot_cpu_id)] + +1: sll %g3, 2, %g3 + + /* Initialize the uwinmask value for init task just in case. + * But first make current_set[boot_cpu_id] point to something useful. + */ + set init_thread_union, %g6 + set current_set, %g2 +#ifdef CONFIG_SMP + st %g6, [%g2] + add %g2, %g3, %g2 +#endif + st %g6, [%g2] + + st %g0, [%g6 + TI_UWINMASK] + +/* Compute NWINDOWS and stash it away. Now uses %wim trick explained + * in the V8 manual. Ok, this method seems to work, Sparc is cool... + * No, it doesn't work, have to play the save/readCWP/restore trick. + */ + + wr %g0, 0x0, %wim ! so we do not get a trap + WRITE_PAUSE + + save + + rd %psr, %g3 + + restore + + and %g3, 0x1f, %g3 + add %g3, 0x1, %g3 + + mov 2, %g1 + wr %g1, 0x0, %wim ! make window 1 invalid + WRITE_PAUSE + + cmp %g3, 0x7 + bne 2f + nop + + /* Adjust our window handling routines to + * do things correctly on 7 window Sparcs. + */ + +#define PATCH_INSN(src, dest) \ + set src, %g5; \ + set dest, %g2; \ + ld [%g5], %g4; \ + st %g4, [%g2]; + + /* Patch for window spills... */ + PATCH_INSN(spnwin_patch1_7win, spnwin_patch1) + PATCH_INSN(spnwin_patch2_7win, spnwin_patch2) + PATCH_INSN(spnwin_patch3_7win, spnwin_patch3) + + /* Patch for window fills... */ + PATCH_INSN(fnwin_patch1_7win, fnwin_patch1) + PATCH_INSN(fnwin_patch2_7win, fnwin_patch2) + + /* Patch for trap entry setup... */ + PATCH_INSN(tsetup_7win_patch1, tsetup_patch1) + PATCH_INSN(tsetup_7win_patch2, tsetup_patch2) + PATCH_INSN(tsetup_7win_patch3, tsetup_patch3) + PATCH_INSN(tsetup_7win_patch4, tsetup_patch4) + PATCH_INSN(tsetup_7win_patch5, tsetup_patch5) + PATCH_INSN(tsetup_7win_patch6, tsetup_patch6) + + /* Patch for returning from traps... */ + PATCH_INSN(rtrap_7win_patch1, rtrap_patch1) + PATCH_INSN(rtrap_7win_patch2, rtrap_patch2) + PATCH_INSN(rtrap_7win_patch3, rtrap_patch3) + PATCH_INSN(rtrap_7win_patch4, rtrap_patch4) + PATCH_INSN(rtrap_7win_patch5, rtrap_patch5) + + /* Patch for killing user windows from the register file. */ + PATCH_INSN(kuw_patch1_7win, kuw_patch1) + + /* Now patch the kernel window flush sequences. + * This saves 2 traps on every switch and fork. + */ + set 0x01000000, %g4 + set flush_patch_one, %g5 + st %g4, [%g5 + 0x18] + st %g4, [%g5 + 0x1c] + set flush_patch_two, %g5 + st %g4, [%g5 + 0x18] + st %g4, [%g5 + 0x1c] + set flush_patch_three, %g5 + st %g4, [%g5 + 0x18] + st %g4, [%g5 + 0x1c] + set flush_patch_four, %g5 + st %g4, [%g5 + 0x18] + st %g4, [%g5 + 0x1c] + set flush_patch_exception, %g5 + st %g4, [%g5 + 0x18] + st %g4, [%g5 + 0x1c] + set flush_patch_switch, %g5 + st %g4, [%g5 + 0x18] + st %g4, [%g5 + 0x1c] + +2: + sethi %hi(nwindows), %g4 + st %g3, [%g4 + %lo(nwindows)] ! store final value + sub %g3, 0x1, %g3 + sethi %hi(nwindowsm1), %g4 + st %g3, [%g4 + %lo(nwindowsm1)] + + /* Here we go, start using Linux's trap table... */ + set trapbase, %g3 + wr %g3, 0x0, %tbr + WRITE_PAUSE + + /* Finally, turn on traps so that we can call c-code. */ + rd %psr, %g3 + wr %g3, 0x0, %psr + WRITE_PAUSE + + wr %g3, PSR_ET, %psr + WRITE_PAUSE + + /* Call sparc32_start_kernel(struct linux_romvec *rp) */ + sethi %hi(prom_vector_p), %g5 + ld [%g5 + %lo(prom_vector_p)], %o0 + call sparc32_start_kernel + nop + + /* We should not get here. */ + call halt_me + nop + +no_sun4e_here: + ld [%g7 + 0x68], %o1 + set sun4e_notsup, %o0 + call %o1 + nop + b halt_me + nop + + __INITDATA + +sun4u_1: + .asciz "finddevice" + .align 4 +sun4u_2: + .asciz "/chosen" + .align 4 +sun4u_3: + .asciz "getprop" + .align 4 +sun4u_4: + .asciz "stdout" + .align 4 +sun4u_5: + .asciz "write" + .align 4 +sun4u_6: + .asciz "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r" +sun4u_6e: + .align 4 +sun4u_7: + .asciz "exit" + .align 8 +sun4u_a1: + .word 0, sun4u_1, 0, 1, 0, 1, 0, sun4u_2, 0 +sun4u_r1: + .word 0 +sun4u_a2: + .word 0, sun4u_3, 0, 4, 0, 1, 0 +sun4u_i2: + .word 0, 0, sun4u_4, 0, sun4u_1, 0, 8, 0 +sun4u_r2: + .word 0 +sun4u_a3: + .word 0, sun4u_5, 0, 3, 0, 1, 0 +sun4u_i3: + .word 0, 0, sun4u_6, 0, sun4u_6e - sun4u_6 - 1, 0 +sun4u_r3: + .word 0 +sun4u_a4: + .word 0, sun4u_7, 0, 0, 0, 0 +sun4u_r4: + + __INIT +no_sun4u_here: + set sun4u_a1, %o0 + set current_pc, %l2 + cmp %l2, %g3 + be 1f + mov %o4, %l0 + sub %g3, %l2, %l6 + add %o0, %l6, %o0 + mov %o0, %l4 + mov sun4u_r4 - sun4u_a1, %l3 + ld [%l4], %l5 +2: + add %l4, 4, %l4 + cmp %l5, %l2 + add %l5, %l6, %l5 + bgeu,a 3f + st %l5, [%l4 - 4] +3: + subcc %l3, 4, %l3 + bne 2b + ld [%l4], %l5 +1: + call %l0 + mov %o0, %l1 + + ld [%l1 + (sun4u_r1 - sun4u_a1)], %o1 + add %l1, (sun4u_a2 - sun4u_a1), %o0 + call %l0 + st %o1, [%o0 + (sun4u_i2 - sun4u_a2)] + + ld [%l1 + (sun4u_1 - sun4u_a1)], %o1 + add %l1, (sun4u_a3 - sun4u_a1), %o0 + call %l0 + st %o1, [%o0 + (sun4u_i3 - sun4u_a3)] + + call %l0 + add %l1, (sun4u_a4 - sun4u_a1), %o0 + + /* Not reached */ +halt_me: + ld [%g7 + 0x74], %o0 + call %o0 ! Get us out of here... + nop ! Apparently Solaris is better. + +/* Ok, now we continue in the .data/.text sections */ + + .data + .align 4 + +/* + * Fill up the prom vector, note in particular the kind first element, + * no joke. I don't need all of them in here as the entire prom vector + * gets initialized in c-code so all routines can use it. + */ + +prom_vector_p: + .word 0 + +/* We calculate the following at boot time, window fills/spills and trap entry + * code uses these to keep track of the register windows. + */ + + .align 4 + .globl nwindows + .globl nwindowsm1 +nwindows: + .word 8 +nwindowsm1: + .word 7 + +/* Boot time debugger vector value. We need this later on. */ + + .align 4 + .globl linux_dbvec +linux_dbvec: + .word 0 + .word 0 + + .align 8 + + .globl lvl14_save +lvl14_save: + .word 0 + .word 0 + .word 0 + .word 0 + .word t_irq14 diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S new file mode 100644 index 000000000..72a5bdc83 --- /dev/null +++ b/arch/sparc/kernel/head_64.S @@ -0,0 +1,969 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* head.S: Initial boot code for the Sparc64 port of Linux. + * + * Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au) + * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* This section from from _start to sparc64_boot_end should fit into + * 0x0000000000404000 to 0x0000000000408000. + */ + .text + .globl start, _start, stext, _stext +_start: +start: +_stext: +stext: +! 0x0000000000404000 + b sparc64_boot + flushw /* Flush register file. */ + +/* This stuff has to be in sync with SILO and other potential boot loaders + * Fields should be kept upward compatible and whenever any change is made, + * HdrS version should be incremented. + */ + .global root_flags, ram_flags, root_dev + .global sparc_ramdisk_image, sparc_ramdisk_size + .global sparc_ramdisk_image64 + + .ascii "HdrS" + .word LINUX_VERSION_CODE + + /* History: + * + * 0x0300 : Supports being located at other than 0x4000 + * 0x0202 : Supports kernel params string + * 0x0201 : Supports reboot_command + */ + .half 0x0301 /* HdrS version */ + +root_flags: + .half 1 +root_dev: + .half 0 +ram_flags: + .half 0 +sparc_ramdisk_image: + .word 0 +sparc_ramdisk_size: + .word 0 + .xword reboot_command + .xword bootstr_info +sparc_ramdisk_image64: + .xword 0 + .word _end + + /* PROM cif handler code address is in %o4. */ +sparc64_boot: + mov %o4, %l7 + + /* We need to remap the kernel. Use position independent + * code to remap us to KERNBASE. + * + * SILO can invoke us with 32-bit address masking enabled, + * so make sure that's clear. + */ + rdpr %pstate, %g1 + andn %g1, PSTATE_AM, %g1 + wrpr %g1, 0x0, %pstate + ba,a,pt %xcc, 1f + nop + + .globl prom_finddev_name, prom_chosen_path, prom_root_node + .globl prom_getprop_name, prom_mmu_name, prom_peer_name + .globl prom_callmethod_name, prom_translate_name, prom_root_compatible + .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache + .globl prom_boot_mapped_pc, prom_boot_mapping_mode + .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low + .globl prom_compatible_name, prom_cpu_path, prom_cpu_compatible + .globl is_sun4v, sun4v_chip_type, prom_set_trap_table_name +prom_peer_name: + .asciz "peer" +prom_compatible_name: + .asciz "compatible" +prom_finddev_name: + .asciz "finddevice" +prom_chosen_path: + .asciz "/chosen" +prom_cpu_path: + .asciz "/cpu" +prom_getprop_name: + .asciz "getprop" +prom_mmu_name: + .asciz "mmu" +prom_callmethod_name: + .asciz "call-method" +prom_translate_name: + .asciz "translate" +prom_map_name: + .asciz "map" +prom_unmap_name: + .asciz "unmap" +prom_set_trap_table_name: + .asciz "SUNW,set-trap-table" +prom_sun4v_name: + .asciz "sun4v" +prom_niagara_prefix: + .asciz "SUNW,UltraSPARC-T" +prom_sparc_prefix: + .asciz "SPARC-" +prom_sparc64x_prefix: + .asciz "SPARC64-X" + .align 4 +prom_root_compatible: + .skip 64 +prom_cpu_compatible: + .skip 64 +prom_root_node: + .word 0 +EXPORT_SYMBOL(prom_root_node) +prom_mmu_ihandle_cache: + .word 0 +prom_boot_mapped_pc: + .word 0 +prom_boot_mapping_mode: + .word 0 + .align 8 +prom_boot_mapping_phys_high: + .xword 0 +prom_boot_mapping_phys_low: + .xword 0 +is_sun4v: + .word 0 +sun4v_chip_type: + .word SUN4V_CHIP_INVALID +EXPORT_SYMBOL(sun4v_chip_type) +1: + rd %pc, %l0 + + mov (1b - prom_peer_name), %l1 + sub %l0, %l1, %l1 + mov 0, %l2 + + /* prom_root_node = prom_peer(0) */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer" + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0 + stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node + mov (1b - prom_root_node), %l1 + sub %l0, %l1, %l1 + stw %l4, [%l1] + + mov (1b - prom_getprop_name), %l1 + mov (1b - prom_compatible_name), %l2 + mov (1b - prom_root_compatible), %l5 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + sub %l0, %l5, %l5 + + /* prom_getproperty(prom_root_node, "compatible", + * &prom_root_compatible, 64) + */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" + mov 4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node + stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" + stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible + mov 64, %l3 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size + stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + mov (1b - prom_finddev_name), %l1 + mov (1b - prom_chosen_path), %l2 + mov (1b - prom_boot_mapped_pc), %l3 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + sub %l0, %l3, %l3 + stw %l0, [%l3] + sub %sp, (192 + 128), %sp + + /* chosen_node = prom_finddevice("/chosen") */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen" + stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node + + mov (1b - prom_getprop_name), %l1 + mov (1b - prom_mmu_name), %l2 + mov (1b - prom_mmu_ihandle_cache), %l5 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + sub %l0, %l5, %l5 + + /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" + mov 4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node + stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu" + stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache + mov 4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3) + stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + mov (1b - prom_callmethod_name), %l1 + mov (1b - prom_translate_name), %l2 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + lduw [%l5], %l5 ! prom_mmu_ihandle_cache + + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method" + mov 3, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3 + mov 5, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5 + stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate" + stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache + /* PAGE align */ + srlx %l0, 13, %l3 + sllx %l3, 13, %l3 + stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC + stx %g0, [%sp + 2047 + 128 + 0x30] ! res1 + stx %g0, [%sp + 2047 + 128 + 0x38] ! res2 + stx %g0, [%sp + 2047 + 128 + 0x40] ! res3 + stx %g0, [%sp + 2047 + 128 + 0x48] ! res4 + stx %g0, [%sp + 2047 + 128 + 0x50] ! res5 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode + mov (1b - prom_boot_mapping_mode), %l4 + sub %l0, %l4, %l4 + stw %l1, [%l4] + mov (1b - prom_boot_mapping_phys_high), %l4 + sub %l0, %l4, %l4 + ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high + stx %l2, [%l4 + 0x0] + ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low + /* 4MB align */ + srlx %l3, ILOG2_4MB, %l3 + sllx %l3, ILOG2_4MB, %l3 + stx %l3, [%l4 + 0x8] + + /* Leave service as-is, "call-method" */ + mov 7, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7 + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + mov (1b - prom_map_name), %l3 + sub %l0, %l3, %l3 + stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map" + /* Leave arg2 as-is, prom_mmu_ihandle_cache */ + mov -1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) + /* 4MB align the kernel image size. */ + set (_end - KERNBASE), %l3 + set ((4 * 1024 * 1024) - 1), %l4 + add %l3, %l4, %l3 + andn %l3, %l4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB) + sethi %hi(KERNBASE), %l3 + stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) + stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty + mov (1b - prom_boot_mapping_phys_low), %l3 + sub %l0, %l3, %l3 + ldx [%l3], %l3 + stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + add %sp, (192 + 128), %sp + + sethi %hi(prom_root_compatible), %g1 + or %g1, %lo(prom_root_compatible), %g1 + sethi %hi(prom_sun4v_name), %g7 + or %g7, %lo(prom_sun4v_name), %g7 + mov 5, %g3 +90: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 80f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 90b + add %g1, 1, %g1 + + sethi %hi(is_sun4v), %g1 + or %g1, %lo(is_sun4v), %g1 + mov 1, %g7 + stw %g7, [%g1] + + /* cpu_node = prom_finddevice("/cpu") */ + mov (1b - prom_finddev_name), %l1 + mov (1b - prom_cpu_path), %l2 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + sub %sp, (192 + 128), %sp + + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/cpu" + stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + ldx [%sp + 2047 + 128 + 0x20], %l4 ! cpu device node + + mov (1b - prom_getprop_name), %l1 + mov (1b - prom_compatible_name), %l2 + mov (1b - prom_cpu_compatible), %l5 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + sub %l0, %l5, %l5 + + /* prom_getproperty(cpu_node, "compatible", + * &prom_cpu_compatible, 64) + */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" + mov 4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, cpu_node + stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" + stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_cpu_compatible + mov 64, %l3 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size + stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + add %sp, (192 + 128), %sp + + sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + sethi %hi(prom_niagara_prefix), %g7 + or %g7, %lo(prom_niagara_prefix), %g7 + mov 17, %g3 +90: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 89f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 90b + add %g1, 1, %g1 + ba,pt %xcc, 91f + nop + +89: sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + sethi %hi(prom_sparc_prefix), %g7 + or %g7, %lo(prom_sparc_prefix), %g7 + mov 6, %g3 +90: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 4f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 90b + add %g1, 1, %g1 + + sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + ldub [%g1 + 6], %g2 + cmp %g2, 'T' + be,pt %xcc, 70f + cmp %g2, 'M' + be,pt %xcc, 70f + cmp %g2, 'S' + bne,pn %xcc, 49f + nop + +70: ldub [%g1 + 7], %g2 + cmp %g2, CPU_ID_NIAGARA3 + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA3, %g4 + cmp %g2, CPU_ID_NIAGARA4 + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA4, %g4 + cmp %g2, CPU_ID_NIAGARA5 + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA5, %g4 + cmp %g2, CPU_ID_M6 + be,pt %xcc, 5f + mov SUN4V_CHIP_SPARC_M6, %g4 + cmp %g2, CPU_ID_M7 + be,pt %xcc, 5f + mov SUN4V_CHIP_SPARC_M7, %g4 + cmp %g2, CPU_ID_M8 + be,pt %xcc, 5f + mov SUN4V_CHIP_SPARC_M8, %g4 + cmp %g2, CPU_ID_SONOMA1 + be,pt %xcc, 5f + mov SUN4V_CHIP_SPARC_SN, %g4 + ba,pt %xcc, 49f + nop + +91: sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + ldub [%g1 + 17], %g2 + cmp %g2, CPU_ID_NIAGARA1 + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA1, %g4 + cmp %g2, CPU_ID_NIAGARA2 + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA2, %g4 + +4: + /* Athena */ + sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + sethi %hi(prom_sparc64x_prefix), %g7 + or %g7, %lo(prom_sparc64x_prefix), %g7 + mov 9, %g3 +41: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 49f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 41b + add %g1, 1, %g1 + ba,pt %xcc, 5f + mov SUN4V_CHIP_SPARC64X, %g4 + +49: + mov SUN4V_CHIP_UNKNOWN, %g4 +5: sethi %hi(sun4v_chip_type), %g2 + or %g2, %lo(sun4v_chip_type), %g2 + stw %g4, [%g2] + +80: + BRANCH_IF_SUN4V(g1, jump_to_sun4u_init) + BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) + BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) + ba,pt %xcc, spitfire_boot + nop + +cheetah_plus_boot: + /* Preserve OBP chosen DCU and DCR register settings. */ + ba,pt %xcc, cheetah_generic_boot + nop + +cheetah_boot: + mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1 + wr %g1, %asr18 + + sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7 + or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7 + sllx %g7, 32, %g7 + or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7 + stxa %g7, [%g0] ASI_DCU_CONTROL_REG + membar #Sync + +cheetah_generic_boot: + mov TSB_EXTENSION_P, %g3 + stxa %g0, [%g3] ASI_DMMU + stxa %g0, [%g3] ASI_IMMU + membar #Sync + + mov TSB_EXTENSION_S, %g3 + stxa %g0, [%g3] ASI_DMMU + membar #Sync + + mov TSB_EXTENSION_N, %g3 + stxa %g0, [%g3] ASI_DMMU + stxa %g0, [%g3] ASI_IMMU + membar #Sync + + ba,a,pt %xcc, jump_to_sun4u_init + +spitfire_boot: + /* Typically PROM has already enabled both MMU's and both on-chip + * caches, but we do it here anyway just to be paranoid. + */ + mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1 + stxa %g1, [%g0] ASI_LSU_CONTROL + membar #Sync + +jump_to_sun4u_init: + /* + * Make sure we are in privileged mode, have address masking, + * using the ordinary globals and have enabled floating + * point. + * + * Again, typically PROM has left %pil at 13 or similar, and + * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate. + */ + wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate + wr %g0, 0, %fprs + + set sun4u_init, %g2 + jmpl %g2 + %g0, %g0 + nop + + __REF +sun4u_init: + BRANCH_IF_SUN4V(g1, sun4v_init) + + /* Set ctx 0 */ + mov PRIMARY_CONTEXT, %g7 + stxa %g0, [%g7] ASI_DMMU + membar #Sync + + mov SECONDARY_CONTEXT, %g7 + stxa %g0, [%g7] ASI_DMMU + membar #Sync + + ba,a,pt %xcc, sun4u_continue + +sun4v_init: + /* Set ctx 0 */ + mov PRIMARY_CONTEXT, %g7 + stxa %g0, [%g7] ASI_MMU + membar #Sync + + mov SECONDARY_CONTEXT, %g7 + stxa %g0, [%g7] ASI_MMU + membar #Sync + ba,a,pt %xcc, niagara_tlb_fixup + +sun4u_continue: + BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) + + ba,a,pt %xcc, spitfire_tlb_fixup + +niagara_tlb_fixup: + mov 3, %g2 /* Set TLB type to hypervisor. */ + sethi %hi(tlb_type), %g1 + stw %g2, [%g1 + %lo(tlb_type)] + + /* Patch copy/clear ops. */ + sethi %hi(sun4v_chip_type), %g1 + lduw [%g1 + %lo(sun4v_chip_type)], %g1 + cmp %g1, SUN4V_CHIP_NIAGARA1 + be,pt %xcc, niagara_patch + cmp %g1, SUN4V_CHIP_NIAGARA2 + be,pt %xcc, niagara2_patch + nop + cmp %g1, SUN4V_CHIP_NIAGARA3 + be,pt %xcc, niagara2_patch + nop + cmp %g1, SUN4V_CHIP_NIAGARA4 + be,pt %xcc, niagara4_patch + nop + cmp %g1, SUN4V_CHIP_NIAGARA5 + be,pt %xcc, niagara4_patch + nop + cmp %g1, SUN4V_CHIP_SPARC_M6 + be,pt %xcc, niagara4_patch + nop + cmp %g1, SUN4V_CHIP_SPARC_M7 + be,pt %xcc, sparc_m7_patch + nop + cmp %g1, SUN4V_CHIP_SPARC_M8 + be,pt %xcc, sparc_m7_patch + nop + cmp %g1, SUN4V_CHIP_SPARC_SN + be,pt %xcc, niagara4_patch + nop + + call generic_patch_copyops + nop + call generic_patch_bzero + nop + call generic_patch_pageops + nop + + ba,a,pt %xcc, 80f + nop + +sparc_m7_patch: + call m7_patch_copyops + nop + call m7_patch_bzero + nop + call m7_patch_pageops + nop + + ba,a,pt %xcc, 80f + nop + +niagara4_patch: + call niagara4_patch_copyops + nop + call niagara4_patch_bzero + nop + call niagara4_patch_pageops + nop + call niagara4_patch_fls + nop + + ba,a,pt %xcc, 80f + nop + +niagara2_patch: + call niagara2_patch_copyops + nop + call niagara_patch_bzero + nop + call niagara_patch_pageops + nop + + ba,a,pt %xcc, 80f + nop + +niagara_patch: + call niagara_patch_copyops + nop + call niagara_patch_bzero + nop + call niagara_patch_pageops + nop + +80: + /* Patch TLB/cache ops. */ + call hypervisor_patch_cachetlbops + nop + + ba,a,pt %xcc, tlb_fixup_done + +cheetah_tlb_fixup: + mov 2, %g2 /* Set TLB type to cheetah+. */ + BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) + + mov 1, %g2 /* Set TLB type to cheetah. */ + +1: sethi %hi(tlb_type), %g1 + stw %g2, [%g1 + %lo(tlb_type)] + + /* Patch copy/page operations to cheetah optimized versions. */ + call cheetah_patch_copyops + nop + call cheetah_patch_copy_page + nop + call cheetah_patch_cachetlbops + nop + + ba,a,pt %xcc, tlb_fixup_done + +spitfire_tlb_fixup: + /* Set TLB type to spitfire. */ + mov 0, %g2 + sethi %hi(tlb_type), %g1 + stw %g2, [%g1 + %lo(tlb_type)] + +tlb_fixup_done: + sethi %hi(init_thread_union), %g6 + or %g6, %lo(init_thread_union), %g6 + ldx [%g6 + TI_TASK], %g4 + + wr %g0, ASI_P, %asi + mov 1, %g1 + sllx %g1, THREAD_SHIFT, %g1 + sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1 + add %g6, %g1, %sp + + /* Set per-cpu pointer initially to zero, this makes + * the boot-cpu use the in-kernel-image per-cpu areas + * before setup_per_cpu_area() is invoked. + */ + clr %g5 + + wrpr %g0, 0, %wstate + wrpr %g0, 0x0, %tl + + /* Clear the bss */ + sethi %hi(__bss_start), %o0 + or %o0, %lo(__bss_start), %o0 + sethi %hi(_end), %o1 + or %o1, %lo(_end), %o1 + call __bzero + sub %o1, %o0, %o1 + + call prom_init + mov %l7, %o0 ! OpenPROM cif handler + + /* To create a one-register-window buffer between the kernel's + * initial stack and the last stack frame we use from the firmware, + * do the rest of the boot from a C helper function. + */ + call start_early_boot + nop + /* Not reached... */ + + .previous + + /* This is meant to allow the sharing of this code between + * boot processor invocation (via setup_tba() below) and + * secondary processor startup (via trampoline.S). The + * former does use this code, the latter does not yet due + * to some complexities. That should be fixed up at some + * point. + * + * There used to be enormous complexity wrt. transferring + * over from the firmware's trap table to the Linux kernel's. + * For example, there was a chicken & egg problem wrt. building + * the OBP page tables, yet needing to be on the Linux kernel + * trap table (to translate PAGE_OFFSET addresses) in order to + * do that. + * + * We now handle OBP tlb misses differently, via linear lookups + * into the prom_trans[] array. So that specific problem no + * longer exists. Yet, unfortunately there are still some issues + * preventing trampoline.S from using this code... ho hum. + */ + .globl setup_trap_table +setup_trap_table: + save %sp, -192, %sp + + /* Force interrupts to be disabled. */ + rdpr %pstate, %l0 + andn %l0, PSTATE_IE, %o1 + wrpr %o1, 0x0, %pstate + rdpr %pil, %l1 + wrpr %g0, PIL_NORMAL_MAX, %pil + + /* Make the firmware call to jump over to the Linux trap table. */ + sethi %hi(is_sun4v), %o0 + lduw [%o0 + %lo(is_sun4v)], %o0 + brz,pt %o0, 1f + nop + + TRAP_LOAD_TRAP_BLOCK(%g2, %g3) + add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + stxa %g2, [%g0] ASI_SCRATCHPAD + + /* Compute physical address: + * + * paddr = kern_base + (mmfsa_vaddr - KERNBASE) + */ + sethi %hi(KERNBASE), %g3 + sub %g2, %g3, %g2 + sethi %hi(kern_base), %g3 + ldx [%g3 + %lo(kern_base)], %g3 + add %g2, %g3, %o1 + sethi %hi(sparc64_ttable_tl0), %o0 + + set prom_set_trap_table_name, %g2 + stx %g2, [%sp + 2047 + 128 + 0x00] + mov 2, %g2 + stx %g2, [%sp + 2047 + 128 + 0x08] + mov 0, %g2 + stx %g2, [%sp + 2047 + 128 + 0x10] + stx %o0, [%sp + 2047 + 128 + 0x18] + stx %o1, [%sp + 2047 + 128 + 0x20] + sethi %hi(p1275buf), %g2 + or %g2, %lo(p1275buf), %g2 + ldx [%g2 + 0x08], %o1 + call %o1 + add %sp, (2047 + 128), %o0 + + ba,a,pt %xcc, 2f + +1: sethi %hi(sparc64_ttable_tl0), %o0 + set prom_set_trap_table_name, %g2 + stx %g2, [%sp + 2047 + 128 + 0x00] + mov 1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x08] + mov 0, %g2 + stx %g2, [%sp + 2047 + 128 + 0x10] + stx %o0, [%sp + 2047 + 128 + 0x18] + sethi %hi(p1275buf), %g2 + or %g2, %lo(p1275buf), %g2 + ldx [%g2 + 0x08], %o1 + call %o1 + add %sp, (2047 + 128), %o0 + + /* Start using proper page size encodings in ctx register. */ +2: sethi %hi(sparc64_kern_pri_context), %g3 + ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 + + mov PRIMARY_CONTEXT, %g1 + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + + membar #Sync + + BRANCH_IF_SUN4V(o2, 1f) + + /* Kill PROM timer */ + sethi %hi(0x80000000), %o2 + sllx %o2, 32, %o2 + wr %o2, 0, %tick_cmpr + + BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) + + ba,a,pt %xcc, 2f + + /* Disable STICK_INT interrupts. */ +1: + sethi %hi(0x80000000), %o2 + sllx %o2, 32, %o2 + wr %o2, %asr25 + +2: + wrpr %g0, %g0, %wstate + + call init_irqwork_curcpu + nop + + /* Now we can restore interrupt state. */ + wrpr %l0, 0, %pstate + wrpr %l1, 0x0, %pil + + ret + restore + + .globl setup_tba +setup_tba: + save %sp, -192, %sp + + /* The boot processor is the only cpu which invokes this + * routine, the other cpus set things up via trampoline.S. + * So save the OBP trap table address here. + */ + rdpr %tba, %g7 + sethi %hi(prom_tba), %o1 + or %o1, %lo(prom_tba), %o1 + stx %g7, [%o1] + + call setup_trap_table + nop + + ret + restore +sparc64_boot_end: + +#include "etrap_64.S" +#include "rtrap_64.S" +#include "winfixup.S" +#include "fpu_traps.S" +#include "ivec.S" +#include "getsetcc.S" +#include "utrap.S" +#include "spiterrs.S" +#include "cherrs.S" +#include "misctrap.S" +#include "syscalls.S" +#include "helpers.S" +#include "sun4v_tlb_miss.S" +#include "sun4v_mcd.S" +#include "sun4v_ivec.S" +#include "ktlb.S" +#include "tsb.S" + +/* + * The following skip makes sure the trap table in ttable.S is aligned + * on a 32K boundary as required by the v9 specs for TBA register. + * + * We align to a 32K boundary, then we have the 32K kernel TSB, + * the 64K kernel 4MB TSB, and then the 32K aligned trap table. + */ +1: + .skip 0x4000 + _start - 1b + +! 0x0000000000408000 + + .globl swapper_tsb +swapper_tsb: + .skip (32 * 1024) + + .globl swapper_4m_tsb +swapper_4m_tsb: + .skip (64 * 1024) + +! 0x0000000000420000 + + /* Some care needs to be exercised if you try to move the + * location of the trap table relative to other things. For + * one thing there are br* instructions in some of the + * trap table entires which branch back to code in ktlb.S + * Those instructions can only handle a signed 16-bit + * displacement. + * + * There is a binutils bug (bugzilla #4558) which causes + * the relocation overflow checks for such instructions to + * not be done correctly. So bintuils will not notice the + * error and will instead write junk into the relocation and + * you'll have an unbootable kernel. + */ +#include "ttable_64.S" + +! 0x0000000000428000 + +#include "hvcalls.S" +#include "systbls_64.S" + + .data + .align 8 + .globl prom_tba, tlb_type +prom_tba: .xword 0 +tlb_type: .word 0 /* Must NOT end up in BSS */ +EXPORT_SYMBOL(tlb_type) + .section ".fixup",#alloc,#execinstr + +ENTRY(__retl_efault) + retl + mov -EFAULT, %o0 +ENDPROC(__retl_efault) + +ENTRY(__retl_o1) + retl + mov %o1, %o0 +ENDPROC(__retl_o1) + +ENTRY(__retl_o1_asi) + wr %o5, 0x0, %asi + retl + mov %o1, %o0 +ENDPROC(__retl_o1_asi) diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S new file mode 100644 index 000000000..9b3f74706 --- /dev/null +++ b/arch/sparc/kernel/helpers.S @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + .align 32 + .globl __flushw_user + .type __flushw_user,#function +__flushw_user: + rdpr %otherwin, %g1 + brz,pn %g1, 2f + clr %g2 +1: save %sp, -128, %sp + rdpr %otherwin, %g1 + brnz,pt %g1, 1b + add %g2, 1, %g2 +1: sub %g2, 1, %g2 + brnz,pt %g2, 1b + restore %g0, %g0, %g0 +2: retl + nop + .size __flushw_user,.-__flushw_user +EXPORT_SYMBOL(__flushw_user) + + /* Flush %fp and %i7 to the stack for all register + * windows active inside of the cpu. This allows + * show_stack_trace() to avoid using an expensive + * 'flushw'. + */ + .globl stack_trace_flush + .type stack_trace_flush,#function +stack_trace_flush: + rdpr %pstate, %o0 + wrpr %o0, PSTATE_IE, %pstate + + rdpr %cwp, %g1 + rdpr %canrestore, %g2 + sub %g1, 1, %g3 + +1: brz,pn %g2, 2f + sub %g2, 1, %g2 + wrpr %g3, %cwp + stx %fp, [%sp + STACK_BIAS + RW_V9_I6] + stx %i7, [%sp + STACK_BIAS + RW_V9_I7] + ba,pt %xcc, 1b + sub %g3, 1, %g3 + +2: wrpr %g1, %cwp + wrpr %o0, %pstate + + retl + nop + .size stack_trace_flush,.-stack_trace_flush + +#ifdef CONFIG_SMP + .globl hard_smp_processor_id + .type hard_smp_processor_id,#function +hard_smp_processor_id: +#endif + .globl real_hard_smp_processor_id + .type real_hard_smp_processor_id,#function +real_hard_smp_processor_id: + __GET_CPUID(%o0) + retl + nop +#ifdef CONFIG_SMP + .size hard_smp_processor_id,.-hard_smp_processor_id +#endif + .size real_hard_smp_processor_id,.-real_hard_smp_processor_id +EXPORT_SYMBOL_GPL(real_hard_smp_processor_id) diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c new file mode 100644 index 000000000..717ec7ef0 --- /dev/null +++ b/arch/sparc/kernel/hvapi.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* hvapi.c: Hypervisor API management. + * + * Copyright (C) 2007 David S. Miller + */ +#include +#include +#include + +#include +#include + +/* If the hypervisor indicates that the API setting + * calls are unsupported, by returning HV_EBADTRAP or + * HV_ENOTSUPPORTED, we assume that API groups with the + * PRE_API flag set are major 1 minor 0. + */ +struct api_info { + unsigned long group; + unsigned long major; + unsigned long minor; + unsigned int refcnt; + unsigned int flags; +#define FLAG_PRE_API 0x00000001 +}; + +static struct api_info api_table[] = { + { .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API }, + { .group = HV_GRP_CORE, .flags = FLAG_PRE_API }, + { .group = HV_GRP_INTR, }, + { .group = HV_GRP_SOFT_STATE, }, + { .group = HV_GRP_TM, }, + { .group = HV_GRP_PCI, .flags = FLAG_PRE_API }, + { .group = HV_GRP_LDOM, }, + { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API }, + { .group = HV_GRP_NCS, .flags = FLAG_PRE_API }, + { .group = HV_GRP_RNG, }, + { .group = HV_GRP_PBOOT, }, + { .group = HV_GRP_TPM, }, + { .group = HV_GRP_SDIO, }, + { .group = HV_GRP_SDIO_ERR, }, + { .group = HV_GRP_REBOOT_DATA, }, + { .group = HV_GRP_ATU, .flags = FLAG_PRE_API }, + { .group = HV_GRP_DAX, }, + { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, + { .group = HV_GRP_FIRE_PERF, }, + { .group = HV_GRP_N2_CPU, }, + { .group = HV_GRP_NIU, }, + { .group = HV_GRP_VF_CPU, }, + { .group = HV_GRP_KT_CPU, }, + { .group = HV_GRP_VT_CPU, }, + { .group = HV_GRP_T5_CPU, }, + { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, + { .group = HV_GRP_M7_PERF, }, +}; + +static DEFINE_SPINLOCK(hvapi_lock); + +static struct api_info *__get_info(unsigned long group) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(api_table); i++) { + if (api_table[i].group == group) + return &api_table[i]; + } + return NULL; +} + +static void __get_ref(struct api_info *p) +{ + p->refcnt++; +} + +static void __put_ref(struct api_info *p) +{ + if (--p->refcnt == 0) { + unsigned long ignore; + + sun4v_set_version(p->group, 0, 0, &ignore); + p->major = p->minor = 0; + } +} + +/* Register a hypervisor API specification. It indicates the + * API group and desired major+minor. + * + * If an existing API registration exists '0' (success) will + * be returned if it is compatible with the one being registered. + * Otherwise a negative error code will be returned. + * + * Otherwise an attempt will be made to negotiate the requested + * API group/major/minor with the hypervisor, and errors returned + * if that does not succeed. + */ +int sun4v_hvapi_register(unsigned long group, unsigned long major, + unsigned long *minor) +{ + struct api_info *p; + unsigned long flags; + int ret; + + spin_lock_irqsave(&hvapi_lock, flags); + p = __get_info(group); + ret = -EINVAL; + if (p) { + if (p->refcnt) { + ret = -EINVAL; + if (p->major == major) { + *minor = p->minor; + ret = 0; + } + } else { + unsigned long actual_minor; + unsigned long hv_ret; + + hv_ret = sun4v_set_version(group, major, *minor, + &actual_minor); + ret = -EINVAL; + if (hv_ret == HV_EOK) { + *minor = actual_minor; + p->major = major; + p->minor = actual_minor; + ret = 0; + } else if (hv_ret == HV_EBADTRAP || + hv_ret == HV_ENOTSUPPORTED) { + if (p->flags & FLAG_PRE_API) { + if (major == 1) { + p->major = 1; + p->minor = 0; + *minor = 0; + ret = 0; + } + } + } + } + + if (ret == 0) + __get_ref(p); + } + spin_unlock_irqrestore(&hvapi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(sun4v_hvapi_register); + +void sun4v_hvapi_unregister(unsigned long group) +{ + struct api_info *p; + unsigned long flags; + + spin_lock_irqsave(&hvapi_lock, flags); + p = __get_info(group); + if (p) + __put_ref(p); + spin_unlock_irqrestore(&hvapi_lock, flags); +} +EXPORT_SYMBOL(sun4v_hvapi_unregister); + +int sun4v_hvapi_get(unsigned long group, + unsigned long *major, + unsigned long *minor) +{ + struct api_info *p; + unsigned long flags; + int ret; + + spin_lock_irqsave(&hvapi_lock, flags); + ret = -EINVAL; + p = __get_info(group); + if (p && p->refcnt) { + *major = p->major; + *minor = p->minor; + ret = 0; + } + spin_unlock_irqrestore(&hvapi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(sun4v_hvapi_get); + +void __init sun4v_hvapi_init(void) +{ + unsigned long group, major, minor; + + group = HV_GRP_SUN4V; + major = 1; + minor = 0; + if (sun4v_hvapi_register(group, major, &minor)) + goto bad; + + group = HV_GRP_CORE; + major = 1; + minor = 6; + if (sun4v_hvapi_register(group, major, &minor)) + goto bad; + + return; + +bad: + prom_printf("HVAPI: Cannot register API group " + "%lx with major(%lu) minor(%lu)\n", + group, major, minor); + prom_halt(); +} diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S new file mode 100644 index 000000000..2f865a464 --- /dev/null +++ b/arch/sparc/kernel/hvcalls.S @@ -0,0 +1,930 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + /* %o0: devhandle + * %o1: devino + * + * returns %o0: sysino + */ +ENTRY(sun4v_devino_to_sysino) + mov HV_FAST_INTR_DEVINO2SYSINO, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 +ENDPROC(sun4v_devino_to_sysino) + + /* %o0: sysino + * + * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED}) + */ +ENTRY(sun4v_intr_getenabled) + mov HV_FAST_INTR_GETENABLED, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 +ENDPROC(sun4v_intr_getenabled) + + /* %o0: sysino + * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) + */ +ENTRY(sun4v_intr_setenabled) + mov HV_FAST_INTR_SETENABLED, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_intr_setenabled) + + /* %o0: sysino + * + * returns %o0: intr_state (HV_INTR_STATE_*) + */ +ENTRY(sun4v_intr_getstate) + mov HV_FAST_INTR_GETSTATE, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 +ENDPROC(sun4v_intr_getstate) + + /* %o0: sysino + * %o1: intr_state (HV_INTR_STATE_*) + */ +ENTRY(sun4v_intr_setstate) + mov HV_FAST_INTR_SETSTATE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_intr_setstate) + + /* %o0: sysino + * + * returns %o0: cpuid + */ +ENTRY(sun4v_intr_gettarget) + mov HV_FAST_INTR_GETTARGET, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 +ENDPROC(sun4v_intr_gettarget) + + /* %o0: sysino + * %o1: cpuid + */ +ENTRY(sun4v_intr_settarget) + mov HV_FAST_INTR_SETTARGET, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_intr_settarget) + + /* %o0: cpuid + * %o1: pc + * %o2: rtba + * %o3: arg0 + * + * returns %o0: status + */ +ENTRY(sun4v_cpu_start) + mov HV_FAST_CPU_START, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_start) + + /* %o0: cpuid + * + * returns %o0: status + */ +ENTRY(sun4v_cpu_stop) + mov HV_FAST_CPU_STOP, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_stop) + + /* returns %o0: status */ +ENTRY(sun4v_cpu_yield) + mov HV_FAST_CPU_YIELD, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_yield) + + /* %o0: cpuid + * + * returns %o0: status + */ +ENTRY(sun4v_cpu_poke) + mov HV_FAST_CPU_POKE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_poke) + + /* %o0: type + * %o1: queue paddr + * %o2: num queue entries + * + * returns %o0: status + */ +ENTRY(sun4v_cpu_qconf) + mov HV_FAST_CPU_QCONF, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_qconf) + + /* %o0: num cpus in cpu list + * %o1: cpu list paddr + * %o2: mondo block paddr + * + * returns %o0: status + */ +ENTRY(sun4v_cpu_mondo_send) + mov HV_FAST_CPU_MONDO_SEND, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_mondo_send) + + /* %o0: CPU ID + * + * returns %o0: -status if status non-zero, else + * %o0: cpu state as HV_CPU_STATE_* + */ +ENTRY(sun4v_cpu_state) + mov HV_FAST_CPU_STATE, %o5 + ta HV_FAST_TRAP + brnz,pn %o0, 1f + sub %g0, %o0, %o0 + mov %o1, %o0 +1: retl + nop +ENDPROC(sun4v_cpu_state) + + /* %o0: virtual address + * %o1: must be zero + * %o2: TTE + * %o3: HV_MMU_* flags + * + * returns %o0: status + */ +ENTRY(sun4v_mmu_map_perm_addr) + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_mmu_map_perm_addr) + + /* %o0: number of TSB descriptions + * %o1: TSB descriptions real address + * + * returns %o0: status + */ +ENTRY(sun4v_mmu_tsb_ctx0) + mov HV_FAST_MMU_TSB_CTX0, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_mmu_tsb_ctx0) + + /* %o0: API group number + * %o1: pointer to unsigned long major number storage + * %o2: pointer to unsigned long minor number storage + * + * returns %o0: status + */ +ENTRY(sun4v_get_version) + mov HV_CORE_GET_VER, %o5 + mov %o1, %o3 + mov %o2, %o4 + ta HV_CORE_TRAP + stx %o1, [%o3] + retl + stx %o2, [%o4] +ENDPROC(sun4v_get_version) + + /* %o0: API group number + * %o1: desired major number + * %o2: desired minor number + * %o3: pointer to unsigned long actual minor number storage + * + * returns %o0: status + */ +ENTRY(sun4v_set_version) + mov HV_CORE_SET_VER, %o5 + mov %o3, %o4 + ta HV_CORE_TRAP + retl + stx %o1, [%o4] +ENDPROC(sun4v_set_version) + + /* %o0: pointer to unsigned long time + * + * returns %o0: status + */ +ENTRY(sun4v_tod_get) + mov %o0, %o4 + mov HV_FAST_TOD_GET, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_tod_get) + + /* %o0: time + * + * returns %o0: status + */ +ENTRY(sun4v_tod_set) + mov HV_FAST_TOD_SET, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_tod_set) + + /* %o0: pointer to unsigned long status + * + * returns %o0: signed character + */ +ENTRY(sun4v_con_getchar) + mov %o0, %o4 + mov HV_FAST_CONS_GETCHAR, %o5 + clr %o0 + clr %o1 + ta HV_FAST_TRAP + stx %o0, [%o4] + retl + sra %o1, 0, %o0 +ENDPROC(sun4v_con_getchar) + + /* %o0: signed long character + * + * returns %o0: status + */ +ENTRY(sun4v_con_putchar) + mov HV_FAST_CONS_PUTCHAR, %o5 + ta HV_FAST_TRAP + retl + sra %o0, 0, %o0 +ENDPROC(sun4v_con_putchar) + + /* %o0: buffer real address + * %o1: buffer size + * %o2: pointer to unsigned long bytes_read + * + * returns %o0: status + */ +ENTRY(sun4v_con_read) + mov %o2, %o4 + mov HV_FAST_CONS_READ, %o5 + ta HV_FAST_TRAP + brnz %o0, 1f + cmp %o1, -1 /* break */ + be,a,pn %icc, 1f + mov %o1, %o0 + cmp %o1, -2 /* hup */ + be,a,pn %icc, 1f + mov %o1, %o0 + stx %o1, [%o4] +1: retl + nop +ENDPROC(sun4v_con_read) + + /* %o0: buffer real address + * %o1: buffer size + * %o2: pointer to unsigned long bytes_written + * + * returns %o0: status + */ +ENTRY(sun4v_con_write) + mov %o2, %o4 + mov HV_FAST_CONS_WRITE, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_con_write) + + /* %o0: soft state + * %o1: address of description string + * + * returns %o0: status + */ +ENTRY(sun4v_mach_set_soft_state) + mov HV_FAST_MACH_SET_SOFT_STATE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_mach_set_soft_state) + + /* %o0: exit code + * + * Does not return. + */ +ENTRY(sun4v_mach_exit) + mov HV_FAST_MACH_EXIT, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_mach_exit) + + /* %o0: buffer real address + * %o1: buffer length + * %o2: pointer to unsigned long real_buf_len + * + * returns %o0: status + */ +ENTRY(sun4v_mach_desc) + mov %o2, %o4 + mov HV_FAST_MACH_DESC, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_mach_desc) + + /* %o0: new timeout in milliseconds + * %o1: pointer to unsigned long orig_timeout + * + * returns %o0: status + */ +ENTRY(sun4v_mach_set_watchdog) + mov %o1, %o4 + mov HV_FAST_MACH_SET_WATCHDOG, %o5 + ta HV_FAST_TRAP + brnz,a,pn %o4, 0f + stx %o1, [%o4] +0: retl + nop +ENDPROC(sun4v_mach_set_watchdog) +EXPORT_SYMBOL(sun4v_mach_set_watchdog) + + /* No inputs and does not return. */ +ENTRY(sun4v_mach_sir) + mov %o1, %o4 + mov HV_FAST_MACH_SIR, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_mach_sir) + + /* %o0: channel + * %o1: ra + * %o2: num_entries + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_tx_qconf) + mov HV_FAST_LDC_TX_QCONF, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ldc_tx_qconf) + + /* %o0: channel + * %o1: pointer to unsigned long ra + * %o2: pointer to unsigned long num_entries + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_tx_qinfo) + mov %o1, %g1 + mov %o2, %g2 + mov HV_FAST_LDC_TX_QINFO, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + stx %o2, [%g2] + retl + nop +ENDPROC(sun4v_ldc_tx_qinfo) + + /* %o0: channel + * %o1: pointer to unsigned long head_off + * %o2: pointer to unsigned long tail_off + * %o2: pointer to unsigned long chan_state + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_tx_get_state) + mov %o1, %g1 + mov %o2, %g2 + mov %o3, %g3 + mov HV_FAST_LDC_TX_GET_STATE, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + stx %o2, [%g2] + stx %o3, [%g3] + retl + nop +ENDPROC(sun4v_ldc_tx_get_state) + + /* %o0: channel + * %o1: tail_off + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_tx_set_qtail) + mov HV_FAST_LDC_TX_SET_QTAIL, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ldc_tx_set_qtail) + + /* %o0: channel + * %o1: ra + * %o2: num_entries + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_rx_qconf) + mov HV_FAST_LDC_RX_QCONF, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ldc_rx_qconf) + + /* %o0: channel + * %o1: pointer to unsigned long ra + * %o2: pointer to unsigned long num_entries + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_rx_qinfo) + mov %o1, %g1 + mov %o2, %g2 + mov HV_FAST_LDC_RX_QINFO, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + stx %o2, [%g2] + retl + nop +ENDPROC(sun4v_ldc_rx_qinfo) + + /* %o0: channel + * %o1: pointer to unsigned long head_off + * %o2: pointer to unsigned long tail_off + * %o2: pointer to unsigned long chan_state + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_rx_get_state) + mov %o1, %g1 + mov %o2, %g2 + mov %o3, %g3 + mov HV_FAST_LDC_RX_GET_STATE, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + stx %o2, [%g2] + stx %o3, [%g3] + retl + nop +ENDPROC(sun4v_ldc_rx_get_state) + + /* %o0: channel + * %o1: head_off + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_rx_set_qhead) + mov HV_FAST_LDC_RX_SET_QHEAD, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ldc_rx_set_qhead) + + /* %o0: channel + * %o1: ra + * %o2: num_entries + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_set_map_table) + mov HV_FAST_LDC_SET_MAP_TABLE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ldc_set_map_table) + + /* %o0: channel + * %o1: pointer to unsigned long ra + * %o2: pointer to unsigned long num_entries + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_get_map_table) + mov %o1, %g1 + mov %o2, %g2 + mov HV_FAST_LDC_GET_MAP_TABLE, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + stx %o2, [%g2] + retl + nop +ENDPROC(sun4v_ldc_get_map_table) + + /* %o0: channel + * %o1: dir_code + * %o2: tgt_raddr + * %o3: lcl_raddr + * %o4: len + * %o5: pointer to unsigned long actual_len + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_copy) + mov %o5, %g1 + mov HV_FAST_LDC_COPY, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + retl + nop +ENDPROC(sun4v_ldc_copy) + + /* %o0: channel + * %o1: cookie + * %o2: pointer to unsigned long ra + * %o3: pointer to unsigned long perm + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_mapin) + mov %o2, %g1 + mov %o3, %g2 + mov HV_FAST_LDC_MAPIN, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + stx %o2, [%g2] + retl + nop +ENDPROC(sun4v_ldc_mapin) + + /* %o0: ra + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_unmap) + mov HV_FAST_LDC_UNMAP, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ldc_unmap) + + /* %o0: channel + * %o1: cookie + * %o2: mte_cookie + * + * returns %o0: status + */ +ENTRY(sun4v_ldc_revoke) + mov HV_FAST_LDC_REVOKE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ldc_revoke) + + /* %o0: device handle + * %o1: device INO + * %o2: pointer to unsigned long cookie + * + * returns %o0: status + */ +ENTRY(sun4v_vintr_get_cookie) + mov %o2, %g1 + mov HV_FAST_VINTR_GET_COOKIE, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + retl + nop +ENDPROC(sun4v_vintr_get_cookie) + + /* %o0: device handle + * %o1: device INO + * %o2: cookie + * + * returns %o0: status + */ +ENTRY(sun4v_vintr_set_cookie) + mov HV_FAST_VINTR_SET_COOKIE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_vintr_set_cookie) + + /* %o0: device handle + * %o1: device INO + * %o2: pointer to unsigned long valid_state + * + * returns %o0: status + */ +ENTRY(sun4v_vintr_get_valid) + mov %o2, %g1 + mov HV_FAST_VINTR_GET_VALID, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + retl + nop +ENDPROC(sun4v_vintr_get_valid) + + /* %o0: device handle + * %o1: device INO + * %o2: valid_state + * + * returns %o0: status + */ +ENTRY(sun4v_vintr_set_valid) + mov HV_FAST_VINTR_SET_VALID, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_vintr_set_valid) + + /* %o0: device handle + * %o1: device INO + * %o2: pointer to unsigned long state + * + * returns %o0: status + */ +ENTRY(sun4v_vintr_get_state) + mov %o2, %g1 + mov HV_FAST_VINTR_GET_STATE, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + retl + nop +ENDPROC(sun4v_vintr_get_state) + + /* %o0: device handle + * %o1: device INO + * %o2: state + * + * returns %o0: status + */ +ENTRY(sun4v_vintr_set_state) + mov HV_FAST_VINTR_SET_STATE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_vintr_set_state) + + /* %o0: device handle + * %o1: device INO + * %o2: pointer to unsigned long cpuid + * + * returns %o0: status + */ +ENTRY(sun4v_vintr_get_target) + mov %o2, %g1 + mov HV_FAST_VINTR_GET_TARGET, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + retl + nop +ENDPROC(sun4v_vintr_get_target) + + /* %o0: device handle + * %o1: device INO + * %o2: cpuid + * + * returns %o0: status + */ +ENTRY(sun4v_vintr_set_target) + mov HV_FAST_VINTR_SET_TARGET, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_vintr_set_target) + + /* %o0: NCS sub-function + * %o1: sub-function arg real-address + * %o2: sub-function arg size + * + * returns %o0: status + */ +ENTRY(sun4v_ncs_request) + mov HV_FAST_NCS_REQUEST, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ncs_request) + +ENTRY(sun4v_svc_send) + save %sp, -192, %sp + mov %i0, %o0 + mov %i1, %o1 + mov %i2, %o2 + mov HV_FAST_SVC_SEND, %o5 + ta HV_FAST_TRAP + stx %o1, [%i3] + ret + restore +ENDPROC(sun4v_svc_send) + +ENTRY(sun4v_svc_recv) + save %sp, -192, %sp + mov %i0, %o0 + mov %i1, %o1 + mov %i2, %o2 + mov HV_FAST_SVC_RECV, %o5 + ta HV_FAST_TRAP + stx %o1, [%i3] + ret + restore +ENDPROC(sun4v_svc_recv) + +ENTRY(sun4v_svc_getstatus) + mov HV_FAST_SVC_GETSTATUS, %o5 + mov %o1, %o4 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_svc_getstatus) + +ENTRY(sun4v_svc_setstatus) + mov HV_FAST_SVC_SETSTATUS, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_svc_setstatus) + +ENTRY(sun4v_svc_clrstatus) + mov HV_FAST_SVC_CLRSTATUS, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_svc_clrstatus) + +ENTRY(sun4v_mmustat_conf) + mov %o1, %o4 + mov HV_FAST_MMUSTAT_CONF, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_mmustat_conf) + +ENTRY(sun4v_mmustat_info) + mov %o0, %o4 + mov HV_FAST_MMUSTAT_INFO, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_mmustat_info) + +ENTRY(sun4v_mmu_demap_all) + clr %o0 + clr %o1 + mov HV_MMU_ALL, %o2 + mov HV_FAST_MMU_DEMAP_ALL, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_mmu_demap_all) + +ENTRY(sun4v_niagara_getperf) + mov %o0, %o4 + mov HV_FAST_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_niagara_getperf) +EXPORT_SYMBOL(sun4v_niagara_getperf) + +ENTRY(sun4v_niagara_setperf) + mov HV_FAST_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_niagara_setperf) +EXPORT_SYMBOL(sun4v_niagara_setperf) + +ENTRY(sun4v_niagara2_getperf) + mov %o0, %o4 + mov HV_FAST_N2_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_niagara2_getperf) +EXPORT_SYMBOL(sun4v_niagara2_getperf) + +ENTRY(sun4v_niagara2_setperf) + mov HV_FAST_N2_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_niagara2_setperf) +EXPORT_SYMBOL(sun4v_niagara2_setperf) + +ENTRY(sun4v_reboot_data_set) + mov HV_FAST_REBOOT_DATA_SET, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_reboot_data_set) + +ENTRY(sun4v_vt_get_perfreg) + mov %o1, %o4 + mov HV_FAST_VT_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_vt_get_perfreg) + +ENTRY(sun4v_vt_set_perfreg) + mov HV_FAST_VT_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_vt_set_perfreg) + +ENTRY(sun4v_t5_get_perfreg) + mov %o1, %o4 + mov HV_FAST_T5_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_t5_get_perfreg) + +ENTRY(sun4v_t5_set_perfreg) + mov HV_FAST_T5_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_t5_set_perfreg) + +ENTRY(sun4v_m7_get_perfreg) + mov %o1, %o4 + mov HV_FAST_M7_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_m7_get_perfreg) + +ENTRY(sun4v_m7_set_perfreg) + mov HV_FAST_M7_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_m7_set_perfreg) + + /* %o0: address of CCB array + * %o1: size (in bytes) of CCB array + * %o2: flags + * %o3: reserved + * + * returns: + * %o0: status + * %o1: size (in bytes) of the CCB array that was accepted + * %o2: status data + * %o3: reserved + */ +ENTRY(sun4v_ccb_submit) + mov %o5, %g1 + mov HV_CCB_SUBMIT, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + stx %o2, [%g1] +ENDPROC(sun4v_ccb_submit) +EXPORT_SYMBOL(sun4v_ccb_submit) + + /* %o0: completion area ra for the ccb to get info + * + * returns: + * %o0: status + * %o1: CCB state + * %o2: position + * %o3: dax unit + * %o4: queue + */ +ENTRY(sun4v_ccb_info) + mov %o1, %g1 + mov HV_CCB_INFO, %o5 + ta HV_FAST_TRAP + sth %o1, [%g1 + CCB_INFO_OFFSET_CCB_STATE] + sth %o2, [%g1 + CCB_INFO_OFFSET_QUEUE_POS] + sth %o3, [%g1 + CCB_INFO_OFFSET_DAX_UNIT] + retl + sth %o4, [%g1 + CCB_INFO_OFFSET_QUEUE_NUM] +ENDPROC(sun4v_ccb_info) +EXPORT_SYMBOL(sun4v_ccb_info) + + /* %o0: completion area ra for the ccb to kill + * + * returns: + * %o0: status + * %o1: result of the kill + */ +ENTRY(sun4v_ccb_kill) + mov %o1, %g1 + mov HV_CCB_KILL, %o5 + ta HV_FAST_TRAP + retl + sth %o1, [%g1] +ENDPROC(sun4v_ccb_kill) +EXPORT_SYMBOL(sun4v_ccb_kill) diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S new file mode 100644 index 000000000..f39220471 --- /dev/null +++ b/arch/sparc/kernel/hvtramp.S @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* hvtramp.S: Hypervisor start-cpu trampoline code. + * + * Copyright (C) 2007, 2008 David S. Miller + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + .align 8 + .globl hv_cpu_startup, hv_cpu_startup_end + + /* This code executes directly out of the hypervisor + * with physical addressing (va==pa). %o0 contains + * our client argument which for Linux points to + * a descriptor data structure which defines the + * MMU entries we need to load up. + * + * After we set things up we enable the MMU and call + * into the kernel. + * + * First setup basic privileged cpu state. + */ +hv_cpu_startup: + SET_GL(0) + wrpr %g0, PIL_NORMAL_MAX, %pil + wrpr %g0, 0, %canrestore + wrpr %g0, 0, %otherwin + wrpr %g0, 6, %cansave + wrpr %g0, 6, %cleanwin + wrpr %g0, 0, %cwp + wrpr %g0, 0, %wstate + wrpr %g0, 0, %tl + + sethi %hi(sparc64_ttable_tl0), %g1 + wrpr %g1, %tba + + mov %o0, %l0 + + lduw [%l0 + HVTRAMP_DESCR_CPU], %g1 + mov SCRATCHPAD_CPUID, %g2 + stxa %g1, [%g2] ASI_SCRATCHPAD + + ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_VA], %g2 + stxa %g2, [%g0] ASI_SCRATCHPAD + + mov 0, %l1 + lduw [%l0 + HVTRAMP_DESCR_NUM_MAPPINGS], %l2 + add %l0, HVTRAMP_DESCR_MAPS, %l3 + +1: ldx [%l3 + HVTRAMP_MAPPING_VADDR], %o0 + clr %o1 + ldx [%l3 + HVTRAMP_MAPPING_TTE], %o2 + mov HV_MMU_IMMU | HV_MMU_DMMU, %o3 + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 + ta HV_FAST_TRAP + + brnz,pn %o0, 80f + nop + + add %l1, 1, %l1 + cmp %l1, %l2 + blt,a,pt %xcc, 1b + add %l3, HVTRAMP_MAPPING_SIZE, %l3 + + ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_PA], %o0 + mov HV_FAST_MMU_FAULT_AREA_CONF, %o5 + ta HV_FAST_TRAP + + brnz,pn %o0, 80f + nop + + wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate + + ldx [%l0 + HVTRAMP_DESCR_THREAD_REG], %l6 + + mov 1, %o0 + set 1f, %o1 + mov HV_FAST_MMU_ENABLE, %o5 + ta HV_FAST_TRAP + + ba,pt %xcc, 80f + nop + +1: + wr %g0, 0, %fprs + wr %g0, ASI_P, %asi + + mov PRIMARY_CONTEXT, %g7 + stxa %g0, [%g7] ASI_MMU + membar #Sync + + mov SECONDARY_CONTEXT, %g7 + stxa %g0, [%g7] ASI_MMU + membar #Sync + + mov %l6, %g6 + ldx [%g6 + TI_TASK], %g4 + + mov 1, %g5 + sllx %g5, THREAD_SHIFT, %g5 + sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 + add %g6, %g5, %sp + + call init_irqwork_curcpu + nop + call hard_smp_processor_id + nop + + call sun4v_register_mondo_queues + nop + + call init_cur_cpu_trap + mov %g6, %o0 + + wrpr %g0, (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE), %pstate + + call smp_callin + nop + + call cpu_panic + nop + +80: ba,pt %xcc, 80b + nop + + .align 8 +hv_cpu_startup_end: diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c new file mode 100644 index 000000000..d6c46d512 --- /dev/null +++ b/arch/sparc/kernel/idprom.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * idprom.c: Routines to load the idprom into kernel addresses and + * interpret the data contained within. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include + +#include +#include + +struct idprom *idprom; +EXPORT_SYMBOL(idprom); + +static struct idprom idprom_buffer; + +#ifdef CONFIG_SPARC32 +#include /* Fun with Sun released architectures. */ + +/* Here is the master table of Sun machines which use some implementation + * of the Sparc CPU and have a meaningful IDPROM machtype value that we + * know about. See asm-sparc/machines.h for empirical constants. + */ +static struct Sun_Machine_Models Sun_Machines[] = { +/* First, Leon */ +{ .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) }, +/* Finally, early Sun4m's */ +{ .name = "Sun4m SparcSystem600", .id_machtype = (SM_SUN4M | SM_4M_SS60) }, +{ .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) }, +{ .name = "Sun4m SparcStation5", .id_machtype = (SM_SUN4M | SM_4M_SS40) }, +/* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */ +{ .name = "Sun4M OBP based system", .id_machtype = (SM_SUN4M_OBP | 0x0) } }; + +static void __init display_system_type(unsigned char machtype) +{ + char sysname[128]; + register int i; + + for (i = 0; i < ARRAY_SIZE(Sun_Machines); i++) { + if (Sun_Machines[i].id_machtype == machtype) { + if (machtype != (SM_SUN4M_OBP | 0x00) || + prom_getproperty(prom_root_node, "banner-name", + sysname, sizeof(sysname)) <= 0) + printk(KERN_WARNING "TYPE: %s\n", + Sun_Machines[i].name); + else + printk(KERN_WARNING "TYPE: %s\n", sysname); + return; + } + } + + prom_printf("IDPROM: Warning, bogus id_machtype value, 0x%x\n", machtype); +} +#else +static void __init display_system_type(unsigned char machtype) +{ +} +#endif + +unsigned char *arch_get_platform_mac_address(void) +{ + return idprom->id_ethaddr; +} + +/* Calculate the IDPROM checksum (xor of the data bytes). */ +static unsigned char __init calc_idprom_cksum(struct idprom *idprom) +{ + unsigned char cksum, i, *ptr = (unsigned char *)idprom; + + for (i = cksum = 0; i <= 0x0E; i++) + cksum ^= *ptr++; + + return cksum; +} + +/* Create a local IDPROM copy, verify integrity, and display information. */ +void __init idprom_init(void) +{ + prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer)); + + idprom = &idprom_buffer; + + if (idprom->id_format != 0x01) + prom_printf("IDPROM: Warning, unknown format type!\n"); + + if (idprom->id_cksum != calc_idprom_cksum(idprom)) + prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n", + idprom->id_cksum, calc_idprom_cksum(idprom)); + + display_system_type(idprom->id_machtype); + + printk(KERN_WARNING "Ethernet address: %pM\n", idprom->id_ethaddr); +} diff --git a/arch/sparc/kernel/iommu-common.c b/arch/sparc/kernel/iommu-common.c new file mode 100644 index 000000000..23ca75f09 --- /dev/null +++ b/arch/sparc/kernel/iommu-common.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IOMMU mmap management and range allocation functions. + * Based almost entirely upon the powerpc iommu allocator. + */ + +#include +#include +#include +#include +#include +#include +#include + +static unsigned long iommu_large_alloc = 15; + +static DEFINE_PER_CPU(unsigned int, iommu_hash_common); + +static inline bool need_flush(struct iommu_map_table *iommu) +{ + return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); +} + +static inline void set_flush(struct iommu_map_table *iommu) +{ + iommu->flags |= IOMMU_NEED_FLUSH; +} + +static inline void clear_flush(struct iommu_map_table *iommu) +{ + iommu->flags &= ~IOMMU_NEED_FLUSH; +} + +static void setup_iommu_pool_hash(void) +{ + unsigned int i; + static bool do_once; + + if (do_once) + return; + do_once = true; + for_each_possible_cpu(i) + per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS); +} + +/* + * Initialize iommu_pool entries for the iommu_map_table. `num_entries' + * is the number of table entries. If `large_pool' is set to true, + * the top 1/4 of the table will be set aside for pool allocations + * of more than iommu_large_alloc pages. + */ +void iommu_tbl_pool_init(struct iommu_map_table *iommu, + unsigned long num_entries, + u32 table_shift, + void (*lazy_flush)(struct iommu_map_table *), + bool large_pool, u32 npools, + bool skip_span_boundary_check) +{ + unsigned int start, i; + struct iommu_pool *p = &(iommu->large_pool); + + setup_iommu_pool_hash(); + if (npools == 0) + iommu->nr_pools = IOMMU_NR_POOLS; + else + iommu->nr_pools = npools; + BUG_ON(npools > IOMMU_NR_POOLS); + + iommu->table_shift = table_shift; + iommu->lazy_flush = lazy_flush; + start = 0; + if (skip_span_boundary_check) + iommu->flags |= IOMMU_NO_SPAN_BOUND; + if (large_pool) + iommu->flags |= IOMMU_HAS_LARGE_POOL; + + if (!large_pool) + iommu->poolsize = num_entries/iommu->nr_pools; + else + iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; + for (i = 0; i < iommu->nr_pools; i++) { + spin_lock_init(&(iommu->pools[i].lock)); + iommu->pools[i].start = start; + iommu->pools[i].hint = start; + start += iommu->poolsize; /* start for next pool */ + iommu->pools[i].end = start - 1; + } + if (!large_pool) + return; + /* initialize large_pool */ + spin_lock_init(&(p->lock)); + p->start = start; + p->hint = p->start; + p->end = num_entries; +} + +unsigned long iommu_tbl_range_alloc(struct device *dev, + struct iommu_map_table *iommu, + unsigned long npages, + unsigned long *handle, + unsigned long mask, + unsigned int align_order) +{ + unsigned int pool_hash = __this_cpu_read(iommu_hash_common); + unsigned long n, end, start, limit, boundary_size; + struct iommu_pool *pool; + int pass = 0; + unsigned int pool_nr; + unsigned int npools = iommu->nr_pools; + unsigned long flags; + bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); + bool largealloc = (large_pool && npages > iommu_large_alloc); + unsigned long shift; + unsigned long align_mask = 0; + + if (align_order > 0) + align_mask = ~0ul >> (BITS_PER_LONG - align_order); + + /* Sanity check */ + if (unlikely(npages == 0)) { + WARN_ON_ONCE(1); + return IOMMU_ERROR_CODE; + } + + if (largealloc) { + pool = &(iommu->large_pool); + pool_nr = 0; /* to keep compiler happy */ + } else { + /* pick out pool_nr */ + pool_nr = pool_hash & (npools - 1); + pool = &(iommu->pools[pool_nr]); + } + spin_lock_irqsave(&pool->lock, flags); + + again: + if (pass == 0 && handle && *handle && + (*handle >= pool->start) && (*handle < pool->end)) + start = *handle; + else + start = pool->hint; + + limit = pool->end; + + /* The case below can happen if we have a small segment appended + * to a large, or when the previous alloc was at the very end of + * the available space. If so, go back to the beginning. If a + * flush is needed, it will get done based on the return value + * from iommu_area_alloc() below. + */ + if (start >= limit) + start = pool->start; + shift = iommu->table_map_base >> iommu->table_shift; + if (limit + shift > mask) { + limit = mask - shift + 1; + /* If we're constrained on address range, first try + * at the masked hint to avoid O(n) search complexity, + * but on second pass, start at 0 in pool 0. + */ + if ((start & mask) >= limit || pass > 0) { + spin_unlock(&(pool->lock)); + pool = &(iommu->pools[0]); + spin_lock(&(pool->lock)); + start = pool->start; + } else { + start &= mask; + } + } + + /* + * if the skip_span_boundary_check had been set during init, we set + * things up so that iommu_is_span_boundary() merely checks if the + * (index + npages) < num_tsb_entries + */ + if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { + shift = 0; + boundary_size = iommu->poolsize * iommu->nr_pools; + } else { + boundary_size = dma_get_seg_boundary_nr_pages(dev, + iommu->table_shift); + } + n = iommu_area_alloc(iommu->map, limit, start, npages, shift, + boundary_size, align_mask); + if (n == -1) { + if (likely(pass == 0)) { + /* First failure, rescan from the beginning. */ + pool->hint = pool->start; + set_flush(iommu); + pass++; + goto again; + } else if (!largealloc && pass <= iommu->nr_pools) { + spin_unlock(&(pool->lock)); + pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); + pool = &(iommu->pools[pool_nr]); + spin_lock(&(pool->lock)); + pool->hint = pool->start; + set_flush(iommu); + pass++; + goto again; + } else { + /* give up */ + n = IOMMU_ERROR_CODE; + goto bail; + } + } + if (iommu->lazy_flush && + (n < pool->hint || need_flush(iommu))) { + clear_flush(iommu); + iommu->lazy_flush(iommu); + } + + end = n + npages; + pool->hint = end; + + /* Update handle for SG allocations */ + if (handle) + *handle = end; +bail: + spin_unlock_irqrestore(&(pool->lock), flags); + + return n; +} + +static struct iommu_pool *get_pool(struct iommu_map_table *tbl, + unsigned long entry) +{ + struct iommu_pool *p; + unsigned long largepool_start = tbl->large_pool.start; + bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); + + /* The large pool is the last pool at the top of the table */ + if (large_pool && entry >= largepool_start) { + p = &tbl->large_pool; + } else { + unsigned int pool_nr = entry / tbl->poolsize; + + BUG_ON(pool_nr >= tbl->nr_pools); + p = &tbl->pools[pool_nr]; + } + return p; +} + +/* Caller supplies the index of the entry into the iommu map table + * itself when the mapping from dma_addr to the entry is not the + * default addr->entry mapping below. + */ +void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, + unsigned long npages, unsigned long entry) +{ + struct iommu_pool *pool; + unsigned long flags; + unsigned long shift = iommu->table_shift; + + if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */ + entry = (dma_addr - iommu->table_map_base) >> shift; + pool = get_pool(iommu, entry); + + spin_lock_irqsave(&(pool->lock), flags); + bitmap_clear(iommu->map, entry, npages); + spin_unlock_irqrestore(&(pool->lock), flags); +} diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c new file mode 100644 index 000000000..da0363692 --- /dev/null +++ b/arch/sparc/kernel/iommu.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0 +/* iommu.c: Generic sparc64 IOMMU support. + * + * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PCI +#include +#endif + +#include + +#include "iommu_common.h" +#include "kernel.h" + +#define STC_CTXMATCH_ADDR(STC, CTX) \ + ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) +#define STC_FLUSHFLAG_INIT(STC) \ + (*((STC)->strbuf_flushflag) = 0UL) +#define STC_FLUSHFLAG_SET(STC) \ + (*((STC)->strbuf_flushflag) != 0UL) + +#define iommu_read(__reg) \ +({ u64 __ret; \ + __asm__ __volatile__("ldxa [%1] %2, %0" \ + : "=r" (__ret) \ + : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ + : "memory"); \ + __ret; \ +}) +#define iommu_write(__reg, __val) \ + __asm__ __volatile__("stxa %0, [%1] %2" \ + : /* no outputs */ \ + : "r" (__val), "r" (__reg), \ + "i" (ASI_PHYS_BYPASS_EC_E)) + +/* Must be invoked under the IOMMU lock. */ +static void iommu_flushall(struct iommu_map_table *iommu_map_table) +{ + struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); + if (iommu->iommu_flushinv) { + iommu_write(iommu->iommu_flushinv, ~(u64)0); + } else { + unsigned long tag; + int entry; + + tag = iommu->iommu_tags; + for (entry = 0; entry < 16; entry++) { + iommu_write(tag, 0); + tag += 8; + } + + /* Ensure completion of previous PIO writes. */ + (void) iommu_read(iommu->write_complete_reg); + } +} + +#define IOPTE_CONSISTENT(CTX) \ + (IOPTE_VALID | IOPTE_CACHE | \ + (((CTX) << 47) & IOPTE_CONTEXT)) + +#define IOPTE_STREAMING(CTX) \ + (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF) + +/* Existing mappings are never marked invalid, instead they + * are pointed to a dummy page. + */ +#define IOPTE_IS_DUMMY(iommu, iopte) \ + ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) + +static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) +{ + unsigned long val = iopte_val(*iopte); + + val &= ~IOPTE_PAGE; + val |= iommu->dummy_page_pa; + + iopte_val(*iopte) = val; +} + +int iommu_table_init(struct iommu *iommu, int tsbsize, + u32 dma_offset, u32 dma_addr_mask, + int numa_node) +{ + unsigned long i, order, sz, num_tsb_entries; + struct page *page; + + num_tsb_entries = tsbsize / sizeof(iopte_t); + + /* Setup initial software IOMMU state. */ + spin_lock_init(&iommu->lock); + iommu->ctx_lowest_free = 1; + iommu->tbl.table_map_base = dma_offset; + iommu->dma_addr_mask = dma_addr_mask; + + /* Allocate and initialize the free area map. */ + sz = num_tsb_entries / 8; + sz = (sz + 7UL) & ~7UL; + iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); + if (!iommu->tbl.map) + return -ENOMEM; + + iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, + (tlb_type != hypervisor ? iommu_flushall : NULL), + false, 1, false); + + /* Allocate and initialize the dummy page which we + * set inactive IO PTEs to point to. + */ + page = alloc_pages_node(numa_node, GFP_KERNEL, 0); + if (!page) { + printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); + goto out_free_map; + } + iommu->dummy_page = (unsigned long) page_address(page); + memset((void *)iommu->dummy_page, 0, PAGE_SIZE); + iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); + + /* Now allocate and setup the IOMMU page table itself. */ + order = get_order(tsbsize); + page = alloc_pages_node(numa_node, GFP_KERNEL, order); + if (!page) { + printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); + goto out_free_dummy_page; + } + iommu->page_table = (iopte_t *)page_address(page); + + for (i = 0; i < num_tsb_entries; i++) + iopte_make_dummy(iommu, &iommu->page_table[i]); + + return 0; + +out_free_dummy_page: + free_page(iommu->dummy_page); + iommu->dummy_page = 0UL; + +out_free_map: + kfree(iommu->tbl.map); + iommu->tbl.map = NULL; + + return -ENOMEM; +} + +static inline iopte_t *alloc_npages(struct device *dev, + struct iommu *iommu, + unsigned long npages) +{ + unsigned long entry; + + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + (unsigned long)(-1), 0); + if (unlikely(entry == IOMMU_ERROR_CODE)) + return NULL; + + return iommu->page_table + entry; +} + +static int iommu_alloc_ctx(struct iommu *iommu) +{ + int lowest = iommu->ctx_lowest_free; + int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); + + if (unlikely(n == IOMMU_NUM_CTXS)) { + n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); + if (unlikely(n == lowest)) { + printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); + n = 0; + } + } + if (n) + __set_bit(n, iommu->ctx_bitmap); + + return n; +} + +static inline void iommu_free_ctx(struct iommu *iommu, int ctx) +{ + if (likely(ctx)) { + __clear_bit(ctx, iommu->ctx_bitmap); + if (ctx < iommu->ctx_lowest_free) + iommu->ctx_lowest_free = ctx; + } +} + +static void *dma_4u_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addrp, gfp_t gfp, + unsigned long attrs) +{ + unsigned long order, first_page; + struct iommu *iommu; + struct page *page; + int npages, nid; + iopte_t *iopte; + void *ret; + + size = IO_PAGE_ALIGN(size); + order = get_order(size); + if (order >= 10) + return NULL; + + nid = dev->archdata.numa_node; + page = alloc_pages_node(nid, gfp, order); + if (unlikely(!page)) + return NULL; + + first_page = (unsigned long) page_address(page); + memset((char *)first_page, 0, PAGE_SIZE << order); + + iommu = dev->archdata.iommu; + + iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); + + if (unlikely(iopte == NULL)) { + free_pages(first_page, order); + return NULL; + } + + *dma_addrp = (iommu->tbl.table_map_base + + ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); + ret = (void *) first_page; + npages = size >> IO_PAGE_SHIFT; + first_page = __pa(first_page); + while (npages--) { + iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) | + IOPTE_WRITE | + (first_page & IOPTE_PAGE)); + iopte++; + first_page += IO_PAGE_SIZE; + } + + return ret; +} + +static void dma_4u_free_coherent(struct device *dev, size_t size, + void *cpu, dma_addr_t dvma, + unsigned long attrs) +{ + struct iommu *iommu; + unsigned long order, npages; + + npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; + iommu = dev->archdata.iommu; + + iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); + + order = get_order(size); + if (order < 10) + free_pages((unsigned long)cpu, order); +} + +static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t sz, + enum dma_data_direction direction, + unsigned long attrs) +{ + struct iommu *iommu; + struct strbuf *strbuf; + iopte_t *base; + unsigned long flags, npages, oaddr; + unsigned long i, base_paddr, ctx; + u32 bus_addr, ret; + unsigned long iopte_protection; + + iommu = dev->archdata.iommu; + strbuf = dev->archdata.stc; + + if (unlikely(direction == DMA_NONE)) + goto bad_no_ctx; + + oaddr = (unsigned long)(page_address(page) + offset); + npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); + npages >>= IO_PAGE_SHIFT; + + base = alloc_npages(dev, iommu, npages); + spin_lock_irqsave(&iommu->lock, flags); + ctx = 0; + if (iommu->iommu_ctxflush) + ctx = iommu_alloc_ctx(iommu); + spin_unlock_irqrestore(&iommu->lock, flags); + + if (unlikely(!base)) + goto bad; + + bus_addr = (iommu->tbl.table_map_base + + ((base - iommu->page_table) << IO_PAGE_SHIFT)); + ret = bus_addr | (oaddr & ~IO_PAGE_MASK); + base_paddr = __pa(oaddr & IO_PAGE_MASK); + if (strbuf->strbuf_enabled) + iopte_protection = IOPTE_STREAMING(ctx); + else + iopte_protection = IOPTE_CONSISTENT(ctx); + if (direction != DMA_TO_DEVICE) + iopte_protection |= IOPTE_WRITE; + + for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) + iopte_val(*base) = iopte_protection | base_paddr; + + return ret; + +bad: + iommu_free_ctx(iommu, ctx); +bad_no_ctx: + if (printk_ratelimit()) + WARN_ON(1); + return DMA_MAPPING_ERROR; +} + +static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, + u32 vaddr, unsigned long ctx, unsigned long npages, + enum dma_data_direction direction) +{ + int limit; + + if (strbuf->strbuf_ctxflush && + iommu->iommu_ctxflush) { + unsigned long matchreg, flushreg; + u64 val; + + flushreg = strbuf->strbuf_ctxflush; + matchreg = STC_CTXMATCH_ADDR(strbuf, ctx); + + iommu_write(flushreg, ctx); + val = iommu_read(matchreg); + val &= 0xffff; + if (!val) + goto do_flush_sync; + + while (val) { + if (val & 0x1) + iommu_write(flushreg, ctx); + val >>= 1; + } + val = iommu_read(matchreg); + if (unlikely(val)) { + printk(KERN_WARNING "strbuf_flush: ctx flush " + "timeout matchreg[%llx] ctx[%lx]\n", + val, ctx); + goto do_page_flush; + } + } else { + unsigned long i; + + do_page_flush: + for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) + iommu_write(strbuf->strbuf_pflush, vaddr); + } + +do_flush_sync: + /* If the device could not have possibly put dirty data into + * the streaming cache, no flush-flag synchronization needs + * to be performed. + */ + if (direction == DMA_TO_DEVICE) + return; + + STC_FLUSHFLAG_INIT(strbuf); + iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); + (void) iommu_read(iommu->write_complete_reg); + + limit = 100000; + while (!STC_FLUSHFLAG_SET(strbuf)) { + limit--; + if (!limit) + break; + udelay(1); + rmb(); + } + if (!limit) + printk(KERN_WARNING "strbuf_flush: flushflag timeout " + "vaddr[%08x] ctx[%lx] npages[%ld]\n", + vaddr, ctx, npages); +} + +static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, + size_t sz, enum dma_data_direction direction, + unsigned long attrs) +{ + struct iommu *iommu; + struct strbuf *strbuf; + iopte_t *base; + unsigned long flags, npages, ctx, i; + + if (unlikely(direction == DMA_NONE)) { + if (printk_ratelimit()) + WARN_ON(1); + return; + } + + iommu = dev->archdata.iommu; + strbuf = dev->archdata.stc; + + npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); + npages >>= IO_PAGE_SHIFT; + base = iommu->page_table + + ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); + bus_addr &= IO_PAGE_MASK; + + spin_lock_irqsave(&iommu->lock, flags); + + /* Record the context, if any. */ + ctx = 0; + if (iommu->iommu_ctxflush) + ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; + + /* Step 1: Kick data out of streaming buffers if necessary. */ + if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + strbuf_flush(strbuf, iommu, bus_addr, ctx, + npages, direction); + + /* Step 2: Clear out TSB entries. */ + for (i = 0; i < npages; i++) + iopte_make_dummy(iommu, base + i); + + iommu_free_ctx(iommu, ctx); + spin_unlock_irqrestore(&iommu->lock, flags); + + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); +} + +static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction, + unsigned long attrs) +{ + struct scatterlist *s, *outs, *segstart; + unsigned long flags, handle, prot, ctx; + dma_addr_t dma_next = 0, dma_addr; + unsigned int max_seg_size; + unsigned long seg_boundary_size; + int outcount, incount, i; + struct strbuf *strbuf; + struct iommu *iommu; + unsigned long base_shift; + + BUG_ON(direction == DMA_NONE); + + iommu = dev->archdata.iommu; + strbuf = dev->archdata.stc; + if (nelems == 0 || !iommu) + return -EINVAL; + + spin_lock_irqsave(&iommu->lock, flags); + + ctx = 0; + if (iommu->iommu_ctxflush) + ctx = iommu_alloc_ctx(iommu); + + if (strbuf->strbuf_enabled) + prot = IOPTE_STREAMING(ctx); + else + prot = IOPTE_CONSISTENT(ctx); + if (direction != DMA_TO_DEVICE) + prot |= IOPTE_WRITE; + + outs = s = segstart = &sglist[0]; + outcount = 1; + incount = nelems; + handle = 0; + + /* Init first segment length for backout at failure */ + outs->dma_length = 0; + + max_seg_size = dma_get_max_seg_size(dev); + seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); + base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; + for_each_sg(sglist, s, nelems, i) { + unsigned long paddr, npages, entry, out_entry = 0, slen; + iopte_t *base; + + slen = s->length; + /* Sanity check */ + if (slen == 0) { + dma_next = 0; + continue; + } + /* Allocate iommu entries for that segment */ + paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); + npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, + &handle, (unsigned long)(-1), 0); + + /* Handle failure */ + if (unlikely(entry == IOMMU_ERROR_CODE)) { + if (printk_ratelimit()) + printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" + " npages %lx\n", iommu, paddr, npages); + goto iommu_map_failed; + } + + base = iommu->page_table + entry; + + /* Convert entry to a dma_addr_t */ + dma_addr = iommu->tbl.table_map_base + + (entry << IO_PAGE_SHIFT); + dma_addr |= (s->offset & ~IO_PAGE_MASK); + + /* Insert into HW table */ + paddr &= IO_PAGE_MASK; + while (npages--) { + iopte_val(*base) = prot | paddr; + base++; + paddr += IO_PAGE_SIZE; + } + + /* If we are in an open segment, try merging */ + if (segstart != s) { + /* We cannot merge if: + * - allocated dma_addr isn't contiguous to previous allocation + */ + if ((dma_addr != dma_next) || + (outs->dma_length + s->length > max_seg_size) || + (is_span_boundary(out_entry, base_shift, + seg_boundary_size, outs, s))) { + /* Can't merge: create a new segment */ + segstart = s; + outcount++; + outs = sg_next(outs); + } else { + outs->dma_length += s->length; + } + } + + if (segstart == s) { + /* This is a new segment, fill entries */ + outs->dma_address = dma_addr; + outs->dma_length = slen; + out_entry = entry; + } + + /* Calculate next page pointer for contiguous check */ + dma_next = dma_addr + slen; + } + + spin_unlock_irqrestore(&iommu->lock, flags); + + if (outcount < incount) { + outs = sg_next(outs); + outs->dma_length = 0; + } + + return outcount; + +iommu_map_failed: + for_each_sg(sglist, s, nelems, i) { + if (s->dma_length != 0) { + unsigned long vaddr, npages, entry, j; + iopte_t *base; + + vaddr = s->dma_address & IO_PAGE_MASK; + npages = iommu_num_pages(s->dma_address, s->dma_length, + IO_PAGE_SIZE); + + entry = (vaddr - iommu->tbl.table_map_base) + >> IO_PAGE_SHIFT; + base = iommu->page_table + entry; + + for (j = 0; j < npages; j++) + iopte_make_dummy(iommu, base + j); + + iommu_tbl_range_free(&iommu->tbl, vaddr, npages, + IOMMU_ERROR_CODE); + + s->dma_length = 0; + } + if (s == outs) + break; + } + spin_unlock_irqrestore(&iommu->lock, flags); + + return -EINVAL; +} + +/* If contexts are being used, they are the same in all of the mappings + * we make for a particular SG. + */ +static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) +{ + unsigned long ctx = 0; + + if (iommu->iommu_ctxflush) { + iopte_t *base; + u32 bus_addr; + struct iommu_map_table *tbl = &iommu->tbl; + + bus_addr = sg->dma_address & IO_PAGE_MASK; + base = iommu->page_table + + ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT); + + ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; + } + return ctx; +} + +static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction, + unsigned long attrs) +{ + unsigned long flags, ctx; + struct scatterlist *sg; + struct strbuf *strbuf; + struct iommu *iommu; + + BUG_ON(direction == DMA_NONE); + + iommu = dev->archdata.iommu; + strbuf = dev->archdata.stc; + + ctx = fetch_sg_ctx(iommu, sglist); + + spin_lock_irqsave(&iommu->lock, flags); + + sg = sglist; + while (nelems--) { + dma_addr_t dma_handle = sg->dma_address; + unsigned int len = sg->dma_length; + unsigned long npages, entry; + iopte_t *base; + int i; + + if (!len) + break; + npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); + + entry = ((dma_handle - iommu->tbl.table_map_base) + >> IO_PAGE_SHIFT); + base = iommu->page_table + entry; + + dma_handle &= IO_PAGE_MASK; + if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + strbuf_flush(strbuf, iommu, dma_handle, ctx, + npages, direction); + + for (i = 0; i < npages; i++) + iopte_make_dummy(iommu, base + i); + + iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, + IOMMU_ERROR_CODE); + sg = sg_next(sg); + } + + iommu_free_ctx(iommu, ctx); + + spin_unlock_irqrestore(&iommu->lock, flags); +} + +static void dma_4u_sync_single_for_cpu(struct device *dev, + dma_addr_t bus_addr, size_t sz, + enum dma_data_direction direction) +{ + struct iommu *iommu; + struct strbuf *strbuf; + unsigned long flags, ctx, npages; + + iommu = dev->archdata.iommu; + strbuf = dev->archdata.stc; + + if (!strbuf->strbuf_enabled) + return; + + spin_lock_irqsave(&iommu->lock, flags); + + npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); + npages >>= IO_PAGE_SHIFT; + bus_addr &= IO_PAGE_MASK; + + /* Step 1: Record the context, if any. */ + ctx = 0; + if (iommu->iommu_ctxflush && + strbuf->strbuf_ctxflush) { + iopte_t *iopte; + struct iommu_map_table *tbl = &iommu->tbl; + + iopte = iommu->page_table + + ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT); + ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; + } + + /* Step 2: Kick data out of streaming buffers. */ + strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); + + spin_unlock_irqrestore(&iommu->lock, flags); +} + +static void dma_4u_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sglist, int nelems, + enum dma_data_direction direction) +{ + struct iommu *iommu; + struct strbuf *strbuf; + unsigned long flags, ctx, npages, i; + struct scatterlist *sg, *sgprv; + u32 bus_addr; + + iommu = dev->archdata.iommu; + strbuf = dev->archdata.stc; + + if (!strbuf->strbuf_enabled) + return; + + spin_lock_irqsave(&iommu->lock, flags); + + /* Step 1: Record the context, if any. */ + ctx = 0; + if (iommu->iommu_ctxflush && + strbuf->strbuf_ctxflush) { + iopte_t *iopte; + struct iommu_map_table *tbl = &iommu->tbl; + + iopte = iommu->page_table + ((sglist[0].dma_address - + tbl->table_map_base) >> IO_PAGE_SHIFT); + ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; + } + + /* Step 2: Kick data out of streaming buffers. */ + bus_addr = sglist[0].dma_address & IO_PAGE_MASK; + sgprv = NULL; + for_each_sg(sglist, sg, nelems, i) { + if (sg->dma_length == 0) + break; + sgprv = sg; + } + + npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) + - bus_addr) >> IO_PAGE_SHIFT; + strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); + + spin_unlock_irqrestore(&iommu->lock, flags); +} + +static int dma_4u_supported(struct device *dev, u64 device_mask) +{ + struct iommu *iommu = dev->archdata.iommu; + + if (ali_sound_dma_hack(dev, device_mask)) + return 1; + + if (device_mask < iommu->dma_addr_mask) + return 0; + return 1; +} + +static const struct dma_map_ops sun4u_dma_ops = { + .alloc = dma_4u_alloc_coherent, + .free = dma_4u_free_coherent, + .map_page = dma_4u_map_page, + .unmap_page = dma_4u_unmap_page, + .map_sg = dma_4u_map_sg, + .unmap_sg = dma_4u_unmap_sg, + .sync_single_for_cpu = dma_4u_sync_single_for_cpu, + .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, + .dma_supported = dma_4u_supported, +}; + +const struct dma_map_ops *dma_ops = &sun4u_dma_ops; +EXPORT_SYMBOL(dma_ops); diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h new file mode 100644 index 000000000..d62ed9c56 --- /dev/null +++ b/arch/sparc/kernel/iommu_common.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* iommu_common.h: UltraSparc SBUS/PCI common iommu declarations. + * + * Copyright (C) 1999, 2008 David S. Miller (davem@davemloft.net) + */ + +#ifndef _IOMMU_COMMON_H +#define _IOMMU_COMMON_H + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * These give mapping size of each iommu pte/tlb. + */ +#define IO_PAGE_SHIFT 13 +#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT) +#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1)) +#define IO_PAGE_ALIGN(addr) ALIGN(addr, IO_PAGE_SIZE) + +#define IO_TSB_ENTRIES (128*1024) +#define IO_TSB_SIZE (IO_TSB_ENTRIES * 8) + +/* + * This is the hardwired shift in the iotlb tag/data parts. + */ +#define IOMMU_PAGE_SHIFT 13 + +#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG)))) + +static inline int is_span_boundary(unsigned long entry, + unsigned long shift, + unsigned long boundary_size, + struct scatterlist *outs, + struct scatterlist *sg) +{ + unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs); + int nr = iommu_num_pages(paddr, outs->dma_length + sg->length, + IO_PAGE_SIZE); + + return iommu_is_span_boundary(entry, nr, shift, boundary_size); +} + +#endif /* _IOMMU_COMMON_H */ diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c new file mode 100644 index 000000000..4e4f3d326 --- /dev/null +++ b/arch/sparc/kernel/ioport.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ioport.c: Simple io mapping allocator. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) + * + * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev. + * + * 2000/01/29 + * zait: as long as pci_alloc_consistent produces something addressable, + * things are ok. + * rth: no, it is relevant, because get_free_pages returns you a + * pointer into the big page mapping + * zait: so what? + * zait: remap_it_my_way(virt_to_phys(get_free_page())) + * Hmm + * Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())). + * So far so good. + * Now, driver calls pci_free_consistent(with result of + * remap_it_my_way()). + * How do you find the address to pass to free_pages()? + * zait: walk the page tables? It's only two or three level after all. + * zait: you have to walk them anyway to remove the mapping. + * Hmm + * Sounds reasonable + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* struct pci_dev */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); +static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, + unsigned long size, char *name); +static void _sparc_free_io(struct resource *res); + +static void register_proc_sparc_ioport(void); + +/* This points to the next to use virtual memory for DVMA mappings */ +static struct resource _sparc_dvma = { + .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1 +}; +/* This points to the start of I/O mappings, cluable from outside. */ +/*ext*/ struct resource sparc_iomap = { + .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1 +}; + +/* + * Our mini-allocator... + * Boy this is gross! We need it because we must map I/O for + * timers and interrupt controller before the kmalloc is available. + */ + +#define XNMLN 15 +#define XNRES 10 /* SS-10 uses 8 */ + +struct xresource { + struct resource xres; /* Must be first */ + int xflag; /* 1 == used */ + char xname[XNMLN+1]; +}; + +static struct xresource xresv[XNRES]; + +static struct xresource *xres_alloc(void) { + struct xresource *xrp; + int n; + + xrp = xresv; + for (n = 0; n < XNRES; n++) { + if (xrp->xflag == 0) { + xrp->xflag = 1; + return xrp; + } + xrp++; + } + return NULL; +} + +static void xres_free(struct xresource *xrp) { + xrp->xflag = 0; +} + +/* + * These are typically used in PCI drivers + * which are trying to be cross-platform. + * + * Bus type is always zero on IIep. + */ +void __iomem *ioremap(phys_addr_t offset, size_t size) +{ + char name[14]; + + sprintf(name, "phys_%08x", (u32)offset); + return _sparc_alloc_io(0, (unsigned long)offset, size, name); +} +EXPORT_SYMBOL(ioremap); + +/* + * Complementary to ioremap(). + */ +void iounmap(volatile void __iomem *virtual) +{ + unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; + struct resource *res; + + /* + * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. + * This probably warrants some sort of hashing. + */ + if ((res = lookup_resource(&sparc_iomap, vaddr)) == NULL) { + printk("free_io/iounmap: cannot free %lx\n", vaddr); + return; + } + _sparc_free_io(res); + + if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) { + xres_free((struct xresource *)res); + } else { + kfree(res); + } +} +EXPORT_SYMBOL(iounmap); + +void __iomem *of_ioremap(struct resource *res, unsigned long offset, + unsigned long size, char *name) +{ + return _sparc_alloc_io(res->flags & 0xF, + res->start + offset, + size, name); +} +EXPORT_SYMBOL(of_ioremap); + +void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) +{ + iounmap(base); +} +EXPORT_SYMBOL(of_iounmap); + +/* + * Meat of mapping + */ +static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, + unsigned long size, char *name) +{ + static int printed_full; + struct xresource *xres; + struct resource *res; + char *tack; + int tlen; + void __iomem *va; /* P3 diag */ + + if (name == NULL) name = "???"; + + if ((xres = xres_alloc()) != NULL) { + tack = xres->xname; + res = &xres->xres; + } else { + if (!printed_full) { + printk("ioremap: done with statics, switching to malloc\n"); + printed_full = 1; + } + tlen = strlen(name); + tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); + if (tack == NULL) return NULL; + memset(tack, 0, sizeof(struct resource)); + res = (struct resource *) tack; + tack += sizeof (struct resource); + } + + strlcpy(tack, name, XNMLN+1); + res->name = tack; + + va = _sparc_ioremap(res, busno, phys, size); + /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */ + return va; +} + +/* + */ +static void __iomem * +_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) +{ + unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); + + if (allocate_resource(&sparc_iomap, res, + (offset + sz + PAGE_SIZE-1) & PAGE_MASK, + sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { + /* Usually we cannot see printks in this case. */ + prom_printf("alloc_io_res(%s): cannot occupy\n", + (res->name != NULL)? res->name: "???"); + prom_halt(); + } + + pa &= PAGE_MASK; + srmmu_mapiorange(bus, pa, res->start, resource_size(res)); + + return (void __iomem *)(unsigned long)(res->start + offset); +} + +/* + * Complementary to _sparc_ioremap(). + */ +static void _sparc_free_io(struct resource *res) +{ + unsigned long plen; + + plen = resource_size(res); + BUG_ON((plen & (PAGE_SIZE-1)) != 0); + srmmu_unmapiorange(res->start, plen); + release_resource(res); +} + +unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len) +{ + struct resource *res; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return 0; + res->name = dev->of_node->full_name; + + if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start, + _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { + printk("%s: cannot occupy 0x%zx", __func__, len); + kfree(res); + return 0; + } + + return res->start; +} + +bool sparc_dma_free_resource(void *cpu_addr, size_t size) +{ + unsigned long addr = (unsigned long)cpu_addr; + struct resource *res; + + res = lookup_resource(&_sparc_dvma, addr); + if (!res) { + printk("%s: cannot free %p\n", __func__, cpu_addr); + return false; + } + + if ((addr & (PAGE_SIZE - 1)) != 0) { + printk("%s: unaligned va %p\n", __func__, cpu_addr); + return false; + } + + size = PAGE_ALIGN(size); + if (resource_size(res) != size) { + printk("%s: region 0x%lx asked 0x%zx\n", + __func__, (long)resource_size(res), size); + return false; + } + + release_resource(res); + kfree(res); + return true; +} + +#ifdef CONFIG_SBUS + +void sbus_set_sbus64(struct device *dev, int x) +{ + printk("sbus_set_sbus64: unsupported\n"); +} +EXPORT_SYMBOL(sbus_set_sbus64); + +static int __init sparc_register_ioport(void) +{ + register_proc_sparc_ioport(); + + return 0; +} + +arch_initcall(sparc_register_ioport); + +#endif /* CONFIG_SBUS */ + +/* + * IIep is write-through, not flushing on cpu to device transfer. + * + * On LEON systems without cache snooping, the entire D-CACHE must be flushed to + * make DMA to cacheable memory coherent. + */ +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + if (dir != DMA_TO_DEVICE && + sparc_cpu_model == sparc_leon && + !sparc_leon3_snooping_enabled()) + leon_flush_dcache_all(); +} + +#ifdef CONFIG_PROC_FS + +static int sparc_io_proc_show(struct seq_file *m, void *v) +{ + struct resource *root = m->private, *r; + const char *nm; + + for (r = root->child; r != NULL; r = r->sibling) { + if ((nm = r->name) == NULL) nm = "???"; + seq_printf(m, "%016llx-%016llx: %s\n", + (unsigned long long)r->start, + (unsigned long long)r->end, nm); + } + + return 0; +} +#endif /* CONFIG_PROC_FS */ + +static void register_proc_sparc_ioport(void) +{ +#ifdef CONFIG_PROC_FS + proc_create_single_data("io_map", 0, NULL, sparc_io_proc_show, + &sparc_iomap); + proc_create_single_data("dvma_map", 0, NULL, sparc_io_proc_show, + &_sparc_dvma); +#endif +} diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h new file mode 100644 index 000000000..b02026ad6 --- /dev/null +++ b/arch/sparc/kernel/irq.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +#include + +struct irq_bucket { + struct irq_bucket *next; + unsigned int real_irq; + unsigned int irq; + unsigned int pil; +}; + +#define SUN4M_HARD_INT(x) (0x000000001 << (x)) +#define SUN4M_SOFT_INT(x) (0x000010000 << (x)) + +#define SUN4D_MAX_BOARD 10 +#define SUN4D_MAX_IRQ ((SUN4D_MAX_BOARD + 2) << 5) + +/* Map between the irq identifier used in hw to the + * irq_bucket. The map is sufficient large to hold + * the sun4d hw identifiers. + */ +extern struct irq_bucket *irq_map[SUN4D_MAX_IRQ]; + + +/* sun4m specific type definitions */ + +/* This maps direct to CPU specific interrupt registers */ +struct sun4m_irq_percpu { + u32 pending; + u32 clear; + u32 set; +}; + +/* This maps direct to global interrupt registers */ +struct sun4m_irq_global { + u32 pending; + u32 mask; + u32 mask_clear; + u32 mask_set; + u32 interrupt_target; +}; + +extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS]; +extern struct sun4m_irq_global __iomem *sun4m_irq_global; + +/* The following definitions describe the individual platform features: */ +#define FEAT_L10_CLOCKSOURCE (1 << 0) /* L10 timer is used as a clocksource */ +#define FEAT_L10_CLOCKEVENT (1 << 1) /* L10 timer is used as a clockevent */ +#define FEAT_L14_ONESHOT (1 << 2) /* L14 timer clockevent can oneshot */ + +/* + * Platform specific configuration + * The individual platforms assign their platform + * specifics in their init functions. + */ +struct sparc_config { + void (*init_timers)(void); + unsigned int (*build_device_irq)(struct platform_device *op, + unsigned int real_irq); + + /* generic clockevent features - see FEAT_* above */ + int features; + + /* clock rate used for clock event timer */ + int clock_rate; + + /* one period for clock source timer */ + unsigned int cs_period; + + /* function to obtain offsett for cs period */ + unsigned int (*get_cycles_offset)(void); + + void (*clear_clock_irq)(void); + void (*load_profile_irq)(int cpu, unsigned int limit); +}; +extern struct sparc_config sparc_config; + +unsigned int irq_alloc(unsigned int real_irq, unsigned int pil); +void irq_link(unsigned int irq); +void irq_unlink(unsigned int irq); +void handler_irq(unsigned int pil, struct pt_regs *regs); + +unsigned long leon_get_irqmask(unsigned int irq); + +/* irq_32.c */ +void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs); + +/* sun4m_irq.c */ +void sun4m_nmi(struct pt_regs *regs); + +/* sun4d_irq.c */ +void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs); + +#ifdef CONFIG_SMP + +/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */ +#define SUN4D_IPI_IRQ 13 + +void sun4d_ipi_interrupt(void); + +#endif diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c new file mode 100644 index 000000000..e8452be51 --- /dev/null +++ b/arch/sparc/kernel/irq_32.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Interrupt request handling routines. On the + * Sparc the IRQs are basically 'cast in stone' + * and you are supposed to probe the prom's device + * node trees to find out who's got which IRQ. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com) + * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) + * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "irq.h" + +/* platform specific irq setup */ +struct sparc_config sparc_config; + +unsigned long arch_local_irq_save(void) +{ + unsigned long retval; + unsigned long tmp; + + __asm__ __volatile__( + "rd %%psr, %0\n\t" + "or %0, %2, %1\n\t" + "wr %1, 0, %%psr\n\t" + "nop; nop; nop\n" + : "=&r" (retval), "=r" (tmp) + : "i" (PSR_PIL) + : "memory"); + + return retval; +} +EXPORT_SYMBOL(arch_local_irq_save); + +void arch_local_irq_enable(void) +{ + unsigned long tmp; + + __asm__ __volatile__( + "rd %%psr, %0\n\t" + "andn %0, %1, %0\n\t" + "wr %0, 0, %%psr\n\t" + "nop; nop; nop\n" + : "=&r" (tmp) + : "i" (PSR_PIL) + : "memory"); +} +EXPORT_SYMBOL(arch_local_irq_enable); + +void arch_local_irq_restore(unsigned long old_psr) +{ + unsigned long tmp; + + __asm__ __volatile__( + "rd %%psr, %0\n\t" + "and %2, %1, %2\n\t" + "andn %0, %1, %0\n\t" + "wr %0, %2, %%psr\n\t" + "nop; nop; nop\n" + : "=&r" (tmp) + : "i" (PSR_PIL), "r" (old_psr) + : "memory"); +} +EXPORT_SYMBOL(arch_local_irq_restore); + +/* + * Dave Redman (djhr@tadpole.co.uk) + * + * IRQ numbers.. These are no longer restricted to 15.. + * + * this is done to enable SBUS cards and onboard IO to be masked + * correctly. using the interrupt level isn't good enough. + * + * For example: + * A device interrupting at sbus level6 and the Floppy both come in + * at IRQ11, but enabling and disabling them requires writing to + * different bits in the SLAVIO/SEC. + * + * As a result of these changes sun4m machines could now support + * directed CPU interrupts using the existing enable/disable irq code + * with tweaks. + * + * Sun4d complicates things even further. IRQ numbers are arbitrary + * 32-bit values in that case. Since this is similar to sparc64, + * we adopt a virtual IRQ numbering scheme as is done there. + * Virutal interrupt numbers are allocated by build_irq(). So NR_IRQS + * just becomes a limit of how many interrupt sources we can handle in + * a single system. Even fully loaded SS2000 machines top off at + * about 32 interrupt sources or so, therefore a NR_IRQS value of 64 + * is more than enough. + * + * We keep a map of per-PIL enable interrupts. These get wired + * up via the irq_chip->startup() method which gets invoked by + * the generic IRQ layer during request_irq(). + */ + + +/* Table of allocated irqs. Unused entries has irq == 0 */ +static struct irq_bucket irq_table[NR_IRQS]; +/* Protect access to irq_table */ +static DEFINE_SPINLOCK(irq_table_lock); + +/* Map between the irq identifier used in hw to the irq_bucket. */ +struct irq_bucket *irq_map[SUN4D_MAX_IRQ]; +/* Protect access to irq_map */ +static DEFINE_SPINLOCK(irq_map_lock); + +/* Allocate a new irq from the irq_table */ +unsigned int irq_alloc(unsigned int real_irq, unsigned int pil) +{ + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&irq_table_lock, flags); + for (i = 1; i < NR_IRQS; i++) { + if (irq_table[i].real_irq == real_irq && irq_table[i].pil == pil) + goto found; + } + + for (i = 1; i < NR_IRQS; i++) { + if (!irq_table[i].irq) + break; + } + + if (i < NR_IRQS) { + irq_table[i].real_irq = real_irq; + irq_table[i].irq = i; + irq_table[i].pil = pil; + } else { + printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); + i = 0; + } +found: + spin_unlock_irqrestore(&irq_table_lock, flags); + + return i; +} + +/* Based on a single pil handler_irq may need to call several + * interrupt handlers. Use irq_map as entry to irq_table, + * and let each entry in irq_table point to the next entry. + */ +void irq_link(unsigned int irq) +{ + struct irq_bucket *p; + unsigned long flags; + unsigned int pil; + + BUG_ON(irq >= NR_IRQS); + + spin_lock_irqsave(&irq_map_lock, flags); + + p = &irq_table[irq]; + pil = p->pil; + BUG_ON(pil >= SUN4D_MAX_IRQ); + p->next = irq_map[pil]; + irq_map[pil] = p; + + spin_unlock_irqrestore(&irq_map_lock, flags); +} + +void irq_unlink(unsigned int irq) +{ + struct irq_bucket *p, **pnext; + unsigned long flags; + + BUG_ON(irq >= NR_IRQS); + + spin_lock_irqsave(&irq_map_lock, flags); + + p = &irq_table[irq]; + BUG_ON(p->pil >= SUN4D_MAX_IRQ); + pnext = &irq_map[p->pil]; + while (*pnext != p) + pnext = &(*pnext)->next; + *pnext = p->next; + + spin_unlock_irqrestore(&irq_map_lock, flags); +} + + +/* /proc/interrupts printing */ +int arch_show_interrupts(struct seq_file *p, int prec) +{ + int j; + +#ifdef CONFIG_SMP + seq_printf(p, "RES: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_data(j).irq_resched_count); + seq_printf(p, " IPI rescheduling interrupts\n"); + seq_printf(p, "CAL: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_data(j).irq_call_count); + seq_printf(p, " IPI function call interrupts\n"); +#endif + seq_printf(p, "NMI: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_data(j).counter); + seq_printf(p, " Non-maskable interrupts\n"); + return 0; +} + +void handler_irq(unsigned int pil, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + struct irq_bucket *p; + + BUG_ON(pil > 15); + old_regs = set_irq_regs(regs); + irq_enter(); + + p = irq_map[pil]; + while (p) { + struct irq_bucket *next = p->next; + + generic_handle_irq(p->irq); + p = next; + } + irq_exit(); + set_irq_regs(old_regs); +} + +#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) +static unsigned int floppy_irq; + +int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler) +{ + unsigned int cpu_irq; + int err; + + + err = request_irq(irq, irq_handler, 0, "floppy", NULL); + if (err) + return -1; + + /* Save for later use in floppy interrupt handler */ + floppy_irq = irq; + + cpu_irq = (irq & (NR_IRQS - 1)); + + /* Dork with trap table if we get this far. */ +#define INSTANTIATE(table) \ + table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ + table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ + SPARC_BRANCH((unsigned long) floppy_hardint, \ + (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ + table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ + table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; + + INSTANTIATE(sparc_ttable) + +#if defined CONFIG_SMP + if (sparc_cpu_model != sparc_leon) { + struct tt_entry *trap_table; + + trap_table = &trapbase_cpu1; + INSTANTIATE(trap_table) + trap_table = &trapbase_cpu2; + INSTANTIATE(trap_table) + trap_table = &trapbase_cpu3; + INSTANTIATE(trap_table) + } +#endif +#undef INSTANTIATE + /* + * XXX Correct thing whould be to flush only I- and D-cache lines + * which contain the handler in question. But as of time of the + * writing we have no CPU-neutral interface to fine-grained flushes. + */ + flush_cache_all(); + return 0; +} +EXPORT_SYMBOL(sparc_floppy_request_irq); + +/* + * These variables are used to access state from the assembler + * interrupt handler, floppy_hardint, so we cannot put these in + * the floppy driver image because that would not work in the + * modular case. + */ +volatile unsigned char *fdc_status; +EXPORT_SYMBOL(fdc_status); + +char *pdma_vaddr; +EXPORT_SYMBOL(pdma_vaddr); + +unsigned long pdma_size; +EXPORT_SYMBOL(pdma_size); + +volatile int doing_pdma; +EXPORT_SYMBOL(doing_pdma); + +char *pdma_base; +EXPORT_SYMBOL(pdma_base); + +unsigned long pdma_areasize; +EXPORT_SYMBOL(pdma_areasize); + +/* Use the generic irq support to call floppy_interrupt + * which was setup using request_irq() in sparc_floppy_request_irq(). + * We only have one floppy interrupt so we do not need to check + * for additional handlers being wired up by irq_link() + */ +void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + + old_regs = set_irq_regs(regs); + irq_enter(); + generic_handle_irq(floppy_irq); + irq_exit(); + set_irq_regs(old_regs); +} +#endif + +/* djhr + * This could probably be made indirect too and assigned in the CPU + * bits of the code. That would be much nicer I think and would also + * fit in with the idea of being able to tune your kernel for your machine + * by removing unrequired machine and device support. + * + */ + +void __init init_IRQ(void) +{ + switch (sparc_cpu_model) { + case sun4m: + pcic_probe(); + if (pcic_present()) + sun4m_pci_init_IRQ(); + else + sun4m_init_IRQ(); + break; + + case sun4d: + sun4d_init_IRQ(); + break; + + case sparc_leon: + leon_init_IRQ(); + break; + + default: + prom_printf("Cannot initialize IRQs on this Sun machine..."); + break; + } +} + diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c new file mode 100644 index 000000000..72da2e10e --- /dev/null +++ b/arch/sparc/kernel/irq_64.c @@ -0,0 +1,1159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* irq.c: UltraSparc IRQ handling/init/registry. + * + * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "entry.h" +#include "cpumap.h" +#include "kstack.h" + +struct ino_bucket *ivector_table; +unsigned long ivector_table_pa; + +/* On several sun4u processors, it is illegal to mix bypass and + * non-bypass accesses. Therefore we access all INO buckets + * using bypass accesses only. + */ +static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) +{ + unsigned long ret; + + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=&r" (ret) + : "r" (bucket_pa + + offsetof(struct ino_bucket, + __irq_chain_pa)), + "i" (ASI_PHYS_USE_EC)); + + return ret; +} + +static void bucket_clear_chain_pa(unsigned long bucket_pa) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1" + : /* no outputs */ + : "r" (bucket_pa + + offsetof(struct ino_bucket, + __irq_chain_pa)), + "i" (ASI_PHYS_USE_EC)); +} + +static unsigned int bucket_get_irq(unsigned long bucket_pa) +{ + unsigned int ret; + + __asm__ __volatile__("lduwa [%1] %2, %0" + : "=&r" (ret) + : "r" (bucket_pa + + offsetof(struct ino_bucket, + __irq)), + "i" (ASI_PHYS_USE_EC)); + + return ret; +} + +static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq) +{ + __asm__ __volatile__("stwa %0, [%1] %2" + : /* no outputs */ + : "r" (irq), + "r" (bucket_pa + + offsetof(struct ino_bucket, + __irq)), + "i" (ASI_PHYS_USE_EC)); +} + +#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) + +static unsigned long hvirq_major __initdata; +static int __init early_hvirq_major(char *p) +{ + int rc = kstrtoul(p, 10, &hvirq_major); + + return rc; +} +early_param("hvirq", early_hvirq_major); + +static int hv_irq_version; + +/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie + * based interfaces, but: + * + * 1) Several OSs, Solaris and Linux included, use them even when only + * negotiating version 1.0 (or failing to negotiate at all). So the + * hypervisor has a workaround that provides the VIRQ interfaces even + * when only verion 1.0 of the API is in use. + * + * 2) Second, and more importantly, with major version 2.0 these VIRQ + * interfaces only were actually hooked up for LDC interrupts, even + * though the Hypervisor specification clearly stated: + * + * The new interrupt API functions will be available to a guest + * when it negotiates version 2.0 in the interrupt API group 0x2. When + * a guest negotiates version 2.0, all interrupt sources will only + * support using the cookie interface, and any attempt to use the + * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the + * ENOTSUPPORTED error being returned. + * + * with an emphasis on "all interrupt sources". + * + * To correct this, major version 3.0 was created which does actually + * support VIRQs for all interrupt sources (not just LDC devices). So + * if we want to move completely over the cookie based VIRQs we must + * negotiate major version 3.0 or later of HV_GRP_INTR. + */ +static bool sun4v_cookie_only_virqs(void) +{ + if (hv_irq_version >= 3) + return true; + return false; +} + +static void __init irq_init_hv(void) +{ + unsigned long hv_error, major, minor = 0; + + if (tlb_type != hypervisor) + return; + + if (hvirq_major) + major = hvirq_major; + else + major = 3; + + hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor); + if (!hv_error) + hv_irq_version = major; + else + hv_irq_version = 1; + + pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n", + hv_irq_version, + sun4v_cookie_only_virqs() ? "enabled" : "disabled"); +} + +/* This function is for the timer interrupt.*/ +int __init arch_probe_nr_irqs(void) +{ + return 1; +} + +#define DEFAULT_NUM_IVECS (0xfffU) +static unsigned int nr_ivec = DEFAULT_NUM_IVECS; +#define NUM_IVECS (nr_ivec) + +static unsigned int __init size_nr_ivec(void) +{ + if (tlb_type == hypervisor) { + switch (sun4v_chip_type) { + /* Athena's devhandle|devino is large.*/ + case SUN4V_CHIP_SPARC64X: + nr_ivec = 0xffff; + break; + } + } + return nr_ivec; +} + +struct irq_handler_data { + union { + struct { + unsigned int dev_handle; + unsigned int dev_ino; + }; + unsigned long sysino; + }; + struct ino_bucket bucket; + unsigned long iclr; + unsigned long imap; +}; + +static inline unsigned int irq_data_to_handle(struct irq_data *data) +{ + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); + + return ihd->dev_handle; +} + +static inline unsigned int irq_data_to_ino(struct irq_data *data) +{ + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); + + return ihd->dev_ino; +} + +static inline unsigned long irq_data_to_sysino(struct irq_data *data) +{ + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); + + return ihd->sysino; +} + +void irq_free(unsigned int irq) +{ + void *data = irq_get_handler_data(irq); + + kfree(data); + irq_set_handler_data(irq, NULL); + irq_free_descs(irq, 1); +} + +unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino) +{ + int irq; + + irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL); + if (irq <= 0) + goto out; + + return irq; +out: + return 0; +} + +static unsigned int cookie_exists(u32 devhandle, unsigned int devino) +{ + unsigned long hv_err, cookie; + struct ino_bucket *bucket; + unsigned int irq = 0U; + + hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie); + if (hv_err) { + pr_err("HV get cookie failed hv_err = %ld\n", hv_err); + goto out; + } + + if (cookie & ((1UL << 63UL))) { + cookie = ~cookie; + bucket = (struct ino_bucket *) __va(cookie); + irq = bucket->__irq; + } +out: + return irq; +} + +static unsigned int sysino_exists(u32 devhandle, unsigned int devino) +{ + unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); + struct ino_bucket *bucket; + unsigned int irq; + + bucket = &ivector_table[sysino]; + irq = bucket_get_irq(__pa(bucket)); + + return irq; +} + +void ack_bad_irq(unsigned int irq) +{ + pr_crit("BAD IRQ ack %d\n", irq); +} + +void irq_install_pre_handler(int irq, + void (*func)(unsigned int, void *, void *), + void *arg1, void *arg2) +{ + pr_warn("IRQ pre handler NOT supported.\n"); +} + +/* + * /proc/interrupts printing: + */ +int arch_show_interrupts(struct seq_file *p, int prec) +{ + int j; + + seq_printf(p, "NMI: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_data(j).__nmi_count); + seq_printf(p, " Non-maskable interrupts\n"); + return 0; +} + +static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) +{ + unsigned int tid; + + if (this_is_starfire) { + tid = starfire_translate(imap, cpuid); + tid <<= IMAP_TID_SHIFT; + tid &= IMAP_TID_UPA; + } else { + if (tlb_type == cheetah || tlb_type == cheetah_plus) { + unsigned long ver; + + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + if ((ver >> 32UL) == __JALAPENO_ID || + (ver >> 32UL) == __SERRANO_ID) { + tid = cpuid << IMAP_TID_SHIFT; + tid &= IMAP_TID_JBUS; + } else { + unsigned int a = cpuid & 0x1f; + unsigned int n = (cpuid >> 5) & 0x1f; + + tid = ((a << IMAP_AID_SHIFT) | + (n << IMAP_NID_SHIFT)); + tid &= (IMAP_AID_SAFARI | + IMAP_NID_SAFARI); + } + } else { + tid = cpuid << IMAP_TID_SHIFT; + tid &= IMAP_TID_UPA; + } + } + + return tid; +} + +#ifdef CONFIG_SMP +static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) +{ + cpumask_t mask; + int cpuid; + + cpumask_copy(&mask, affinity); + if (cpumask_equal(&mask, cpu_online_mask)) { + cpuid = map_to_cpu(irq); + } else { + cpumask_t tmp; + + cpumask_and(&tmp, cpu_online_mask, &mask); + cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp); + } + + return cpuid; +} +#else +#define irq_choose_cpu(irq, affinity) \ + real_hard_smp_processor_id() +#endif + +static void sun4u_irq_enable(struct irq_data *data) +{ + struct irq_handler_data *handler_data; + + handler_data = irq_data_get_irq_handler_data(data); + if (likely(handler_data)) { + unsigned long cpuid, imap, val; + unsigned int tid; + + cpuid = irq_choose_cpu(data->irq, + irq_data_get_affinity_mask(data)); + imap = handler_data->imap; + + tid = sun4u_compute_tid(imap, cpuid); + + val = upa_readq(imap); + val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | + IMAP_AID_SAFARI | IMAP_NID_SAFARI); + val |= tid | IMAP_VALID; + upa_writeq(val, imap); + upa_writeq(ICLR_IDLE, handler_data->iclr); + } +} + +static int sun4u_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct irq_handler_data *handler_data; + + handler_data = irq_data_get_irq_handler_data(data); + if (likely(handler_data)) { + unsigned long cpuid, imap, val; + unsigned int tid; + + cpuid = irq_choose_cpu(data->irq, mask); + imap = handler_data->imap; + + tid = sun4u_compute_tid(imap, cpuid); + + val = upa_readq(imap); + val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | + IMAP_AID_SAFARI | IMAP_NID_SAFARI); + val |= tid | IMAP_VALID; + upa_writeq(val, imap); + upa_writeq(ICLR_IDLE, handler_data->iclr); + } + + return 0; +} + +/* Don't do anything. The desc->status check for IRQ_DISABLED in + * handler_irq() will skip the handler call and that will leave the + * interrupt in the sent state. The next ->enable() call will hit the + * ICLR register to reset the state machine. + * + * This scheme is necessary, instead of clearing the Valid bit in the + * IMAP register, to handle the case of IMAP registers being shared by + * multiple INOs (and thus ICLR registers). Since we use a different + * virtual IRQ for each shared IMAP instance, the generic code thinks + * there is only one user so it prematurely calls ->disable() on + * free_irq(). + * + * We have to provide an explicit ->disable() method instead of using + * NULL to get the default. The reason is that if the generic code + * sees that, it also hooks up a default ->shutdown method which + * invokes ->mask() which we do not want. See irq_chip_set_defaults(). + */ +static void sun4u_irq_disable(struct irq_data *data) +{ +} + +static void sun4u_irq_eoi(struct irq_data *data) +{ + struct irq_handler_data *handler_data; + + handler_data = irq_data_get_irq_handler_data(data); + if (likely(handler_data)) + upa_writeq(ICLR_IDLE, handler_data->iclr); +} + +static void sun4v_irq_enable(struct irq_data *data) +{ + unsigned long cpuid = irq_choose_cpu(data->irq, + irq_data_get_affinity_mask(data)); + unsigned int ino = irq_data_to_sysino(data); + int err; + + err = sun4v_intr_settarget(ino, cpuid); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " + "err(%d)\n", ino, cpuid, err); + err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_intr_setstate(%x): " + "err(%d)\n", ino, err); + err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", + ino, err); +} + +static int sun4v_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + unsigned long cpuid = irq_choose_cpu(data->irq, mask); + unsigned int ino = irq_data_to_sysino(data); + int err; + + err = sun4v_intr_settarget(ino, cpuid); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " + "err(%d)\n", ino, cpuid, err); + + return 0; +} + +static void sun4v_irq_disable(struct irq_data *data) +{ + unsigned int ino = irq_data_to_sysino(data); + int err; + + err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_intr_setenabled(%x): " + "err(%d)\n", ino, err); +} + +static void sun4v_irq_eoi(struct irq_data *data) +{ + unsigned int ino = irq_data_to_sysino(data); + int err; + + err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_intr_setstate(%x): " + "err(%d)\n", ino, err); +} + +static void sun4v_virq_enable(struct irq_data *data) +{ + unsigned long dev_handle = irq_data_to_handle(data); + unsigned long dev_ino = irq_data_to_ino(data); + unsigned long cpuid; + int err; + + cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data)); + + err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " + "err(%d)\n", + dev_handle, dev_ino, cpuid, err); + err = sun4v_vintr_set_state(dev_handle, dev_ino, + HV_INTR_STATE_IDLE); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," + "HV_INTR_STATE_IDLE): err(%d)\n", + dev_handle, dev_ino, err); + err = sun4v_vintr_set_valid(dev_handle, dev_ino, + HV_INTR_ENABLED); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," + "HV_INTR_ENABLED): err(%d)\n", + dev_handle, dev_ino, err); +} + +static int sun4v_virt_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + unsigned long dev_handle = irq_data_to_handle(data); + unsigned long dev_ino = irq_data_to_ino(data); + unsigned long cpuid; + int err; + + cpuid = irq_choose_cpu(data->irq, mask); + + err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " + "err(%d)\n", + dev_handle, dev_ino, cpuid, err); + + return 0; +} + +static void sun4v_virq_disable(struct irq_data *data) +{ + unsigned long dev_handle = irq_data_to_handle(data); + unsigned long dev_ino = irq_data_to_ino(data); + int err; + + + err = sun4v_vintr_set_valid(dev_handle, dev_ino, + HV_INTR_DISABLED); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," + "HV_INTR_DISABLED): err(%d)\n", + dev_handle, dev_ino, err); +} + +static void sun4v_virq_eoi(struct irq_data *data) +{ + unsigned long dev_handle = irq_data_to_handle(data); + unsigned long dev_ino = irq_data_to_ino(data); + int err; + + err = sun4v_vintr_set_state(dev_handle, dev_ino, + HV_INTR_STATE_IDLE); + if (err != HV_EOK) + printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," + "HV_INTR_STATE_IDLE): err(%d)\n", + dev_handle, dev_ino, err); +} + +static struct irq_chip sun4u_irq = { + .name = "sun4u", + .irq_enable = sun4u_irq_enable, + .irq_disable = sun4u_irq_disable, + .irq_eoi = sun4u_irq_eoi, + .irq_set_affinity = sun4u_set_affinity, + .flags = IRQCHIP_EOI_IF_HANDLED, +}; + +static struct irq_chip sun4v_irq = { + .name = "sun4v", + .irq_enable = sun4v_irq_enable, + .irq_disable = sun4v_irq_disable, + .irq_eoi = sun4v_irq_eoi, + .irq_set_affinity = sun4v_set_affinity, + .flags = IRQCHIP_EOI_IF_HANDLED, +}; + +static struct irq_chip sun4v_virq = { + .name = "vsun4v", + .irq_enable = sun4v_virq_enable, + .irq_disable = sun4v_virq_disable, + .irq_eoi = sun4v_virq_eoi, + .irq_set_affinity = sun4v_virt_set_affinity, + .flags = IRQCHIP_EOI_IF_HANDLED, +}; + +unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) +{ + struct irq_handler_data *handler_data; + struct ino_bucket *bucket; + unsigned int irq; + int ino; + + BUG_ON(tlb_type == hypervisor); + + ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; + bucket = &ivector_table[ino]; + irq = bucket_get_irq(__pa(bucket)); + if (!irq) { + irq = irq_alloc(0, ino); + bucket_set_irq(__pa(bucket), irq); + irq_set_chip_and_handler_name(irq, &sun4u_irq, + handle_fasteoi_irq, "IVEC"); + } + + handler_data = irq_get_handler_data(irq); + if (unlikely(handler_data)) + goto out; + + handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) { + prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); + prom_halt(); + } + irq_set_handler_data(irq, handler_data); + + handler_data->imap = imap; + handler_data->iclr = iclr; + +out: + return irq; +} + +static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino, + void (*handler_data_init)(struct irq_handler_data *data, + u32 devhandle, unsigned int devino), + struct irq_chip *chip) +{ + struct irq_handler_data *data; + unsigned int irq; + + irq = irq_alloc(devhandle, devino); + if (!irq) + goto out; + + data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); + if (unlikely(!data)) { + pr_err("IRQ handler data allocation failed.\n"); + irq_free(irq); + irq = 0; + goto out; + } + + irq_set_handler_data(irq, data); + handler_data_init(data, devhandle, devino); + irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC"); + data->imap = ~0UL; + data->iclr = ~0UL; +out: + return irq; +} + +static unsigned long cookie_assign(unsigned int irq, u32 devhandle, + unsigned int devino) +{ + struct irq_handler_data *ihd = irq_get_handler_data(irq); + unsigned long hv_error, cookie; + + /* handler_irq needs to find the irq. cookie is seen signed in + * sun4v_dev_mondo and treated as a non ivector_table delivery. + */ + ihd->bucket.__irq = irq; + cookie = ~__pa(&ihd->bucket); + + hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie); + if (hv_error) + pr_err("HV vintr set cookie failed = %ld\n", hv_error); + + return hv_error; +} + +static void cookie_handler_data(struct irq_handler_data *data, + u32 devhandle, unsigned int devino) +{ + data->dev_handle = devhandle; + data->dev_ino = devino; +} + +static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino, + struct irq_chip *chip) +{ + unsigned long hv_error; + unsigned int irq; + + irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip); + + hv_error = cookie_assign(irq, devhandle, devino); + if (hv_error) { + irq_free(irq); + irq = 0; + } + + return irq; +} + +static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino) +{ + unsigned int irq; + + irq = cookie_exists(devhandle, devino); + if (irq) + goto out; + + irq = cookie_build_irq(devhandle, devino, &sun4v_virq); + +out: + return irq; +} + +static void sysino_set_bucket(unsigned int irq) +{ + struct irq_handler_data *ihd = irq_get_handler_data(irq); + struct ino_bucket *bucket; + unsigned long sysino; + + sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino); + BUG_ON(sysino >= nr_ivec); + bucket = &ivector_table[sysino]; + bucket_set_irq(__pa(bucket), irq); +} + +static void sysino_handler_data(struct irq_handler_data *data, + u32 devhandle, unsigned int devino) +{ + unsigned long sysino; + + sysino = sun4v_devino_to_sysino(devhandle, devino); + data->sysino = sysino; +} + +static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino, + struct irq_chip *chip) +{ + unsigned int irq; + + irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip); + if (!irq) + goto out; + + sysino_set_bucket(irq); +out: + return irq; +} + +static int sun4v_build_sysino(u32 devhandle, unsigned int devino) +{ + int irq; + + irq = sysino_exists(devhandle, devino); + if (irq) + goto out; + + irq = sysino_build_irq(devhandle, devino, &sun4v_irq); +out: + return irq; +} + +unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) +{ + unsigned int irq; + + if (sun4v_cookie_only_virqs()) + irq = sun4v_build_cookie(devhandle, devino); + else + irq = sun4v_build_sysino(devhandle, devino); + + return irq; +} + +unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) +{ + int irq; + + irq = cookie_build_irq(devhandle, devino, &sun4v_virq); + if (!irq) + goto out; + + /* This is borrowed from the original function. + */ + irq_set_status_flags(irq, IRQ_NOAUTOEN); + +out: + return irq; +} + +void *hardirq_stack[NR_CPUS]; +void *softirq_stack[NR_CPUS]; + +void __irq_entry handler_irq(int pil, struct pt_regs *regs) +{ + unsigned long pstate, bucket_pa; + struct pt_regs *old_regs; + void *orig_sp; + + clear_softint(1 << pil); + + old_regs = set_irq_regs(regs); + irq_enter(); + + /* Grab an atomic snapshot of the pending IVECs. */ + __asm__ __volatile__("rdpr %%pstate, %0\n\t" + "wrpr %0, %3, %%pstate\n\t" + "ldx [%2], %1\n\t" + "stx %%g0, [%2]\n\t" + "wrpr %0, 0x0, %%pstate\n\t" + : "=&r" (pstate), "=&r" (bucket_pa) + : "r" (irq_work_pa(smp_processor_id())), + "i" (PSTATE_IE) + : "memory"); + + orig_sp = set_hardirq_stack(); + + while (bucket_pa) { + unsigned long next_pa; + unsigned int irq; + + next_pa = bucket_get_chain_pa(bucket_pa); + irq = bucket_get_irq(bucket_pa); + bucket_clear_chain_pa(bucket_pa); + + generic_handle_irq(irq); + + bucket_pa = next_pa; + } + + restore_hardirq_stack(orig_sp); + + irq_exit(); + set_irq_regs(old_regs); +} + +#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK +void do_softirq_own_stack(void) +{ + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; + + sp += THREAD_SIZE - 192 - STACK_BIAS; + + __asm__ __volatile__("mov %%sp, %0\n\t" + "mov %1, %%sp" + : "=&r" (orig_sp) + : "r" (sp)); + __do_softirq(); + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); +} +#endif + +#ifdef CONFIG_HOTPLUG_CPU +void fixup_irqs(void) +{ + unsigned int irq; + + for (irq = 0; irq < NR_IRQS; irq++) { + struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *data; + unsigned long flags; + + if (!desc) + continue; + data = irq_desc_get_irq_data(desc); + raw_spin_lock_irqsave(&desc->lock, flags); + if (desc->action && !irqd_is_per_cpu(data)) { + if (data->chip->irq_set_affinity) + data->chip->irq_set_affinity(data, + irq_data_get_affinity_mask(data), + false); + } + raw_spin_unlock_irqrestore(&desc->lock, flags); + } + + tick_ops->disable_irq(); +} +#endif + +struct sun5_timer { + u64 count0; + u64 limit0; + u64 count1; + u64 limit1; +}; + +static struct sun5_timer *prom_timers; +static u64 prom_limit0, prom_limit1; + +static void map_prom_timers(void) +{ + struct device_node *dp; + const unsigned int *addr; + + /* PROM timer node hangs out in the top level of device siblings... */ + dp = of_find_node_by_path("/"); + dp = dp->child; + while (dp) { + if (of_node_name_eq(dp, "counter-timer")) + break; + dp = dp->sibling; + } + + /* Assume if node is not present, PROM uses different tick mechanism + * which we should not care about. + */ + if (!dp) { + prom_timers = (struct sun5_timer *) 0; + return; + } + + /* If PROM is really using this, it must be mapped by him. */ + addr = of_get_property(dp, "address", NULL); + if (!addr) { + prom_printf("PROM does not have timer mapped, trying to continue.\n"); + prom_timers = (struct sun5_timer *) 0; + return; + } + prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); +} + +static void kill_prom_timer(void) +{ + if (!prom_timers) + return; + + /* Save them away for later. */ + prom_limit0 = prom_timers->limit0; + prom_limit1 = prom_timers->limit1; + + /* Just as in sun4c PROM uses timer which ticks at IRQ 14. + * We turn both off here just to be paranoid. + */ + prom_timers->limit0 = 0; + prom_timers->limit1 = 0; + + /* Wheee, eat the interrupt packet too... */ + __asm__ __volatile__( +" mov 0x40, %%g2\n" +" ldxa [%%g0] %0, %%g1\n" +" ldxa [%%g2] %1, %%g1\n" +" stxa %%g0, [%%g0] %0\n" +" membar #Sync\n" + : /* no outputs */ + : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) + : "g1", "g2"); +} + +void notrace init_irqwork_curcpu(void) +{ + int cpu = hard_smp_processor_id(); + + trap_block[cpu].irq_worklist_pa = 0UL; +} + +/* Please be very careful with register_one_mondo() and + * sun4v_register_mondo_queues(). + * + * On SMP this gets invoked from the CPU trampoline before + * the cpu has fully taken over the trap table from OBP, + * and it's kernel stack + %g6 thread register state is + * not fully cooked yet. + * + * Therefore you cannot make any OBP calls, not even prom_printf, + * from these two routines. + */ +static void notrace register_one_mondo(unsigned long paddr, unsigned long type, + unsigned long qmask) +{ + unsigned long num_entries = (qmask + 1) / 64; + unsigned long status; + + status = sun4v_cpu_qconf(type, paddr, num_entries); + if (status != HV_EOK) { + prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " + "err %lu\n", type, paddr, num_entries, status); + prom_halt(); + } +} + +void notrace sun4v_register_mondo_queues(int this_cpu) +{ + struct trap_per_cpu *tb = &trap_block[this_cpu]; + + register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, + tb->cpu_mondo_qmask); + register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, + tb->dev_mondo_qmask); + register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, + tb->resum_qmask); + register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, + tb->nonresum_qmask); +} + +/* Each queue region must be a power of 2 multiple of 64 bytes in + * size. The base real address must be aligned to the size of the + * region. Thus, an 8KB queue must be 8KB aligned, for example. + */ +static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) +{ + unsigned long size = PAGE_ALIGN(qmask + 1); + unsigned long order = get_order(size); + unsigned long p; + + p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!p) { + prom_printf("SUN4V: Error, cannot allocate queue.\n"); + prom_halt(); + } + + *pa_ptr = __pa(p); +} + +static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) +{ +#ifdef CONFIG_SMP + unsigned long page; + void *mondo, *p; + + BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); + + /* Make sure mondo block is 64byte aligned */ + p = kzalloc(127, GFP_KERNEL); + if (!p) { + prom_printf("SUN4V: Error, cannot allocate mondo block.\n"); + prom_halt(); + } + mondo = (void *)(((unsigned long)p + 63) & ~0x3f); + tb->cpu_mondo_block_pa = __pa(mondo); + + page = get_zeroed_page(GFP_KERNEL); + if (!page) { + prom_printf("SUN4V: Error, cannot allocate cpu list page.\n"); + prom_halt(); + } + + tb->cpu_list_pa = __pa(page); +#endif +} + +/* Allocate mondo and error queues for all possible cpus. */ +static void __init sun4v_init_mondo_queues(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct trap_per_cpu *tb = &trap_block[cpu]; + + alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); + alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); + alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); + alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); + alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); + alloc_one_queue(&tb->nonresum_kernel_buf_pa, + tb->nonresum_qmask); + } +} + +static void __init init_send_mondo_info(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct trap_per_cpu *tb = &trap_block[cpu]; + + init_cpu_send_mondo_info(tb); + } +} + +static struct irqaction timer_irq_action = { + .name = "timer", +}; + +static void __init irq_ivector_init(void) +{ + unsigned long size, order; + unsigned int ivecs; + + /* If we are doing cookie only VIRQs then we do not need the ivector + * table to process interrupts. + */ + if (sun4v_cookie_only_virqs()) + return; + + ivecs = size_nr_ivec(); + size = sizeof(struct ino_bucket) * ivecs; + order = get_order(size); + ivector_table = (struct ino_bucket *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!ivector_table) { + prom_printf("Fatal error, cannot allocate ivector_table\n"); + prom_halt(); + } + __flush_dcache_range((unsigned long) ivector_table, + ((unsigned long) ivector_table) + size); + + ivector_table_pa = __pa(ivector_table); +} + +/* Only invoked on boot processor.*/ +void __init init_IRQ(void) +{ + irq_init_hv(); + irq_ivector_init(); + map_prom_timers(); + kill_prom_timer(); + + if (tlb_type == hypervisor) + sun4v_init_mondo_queues(); + + init_send_mondo_info(); + + if (tlb_type == hypervisor) { + /* Load up the boot cpu's entries. */ + sun4v_register_mondo_queues(hard_smp_processor_id()); + } + + /* We need to clear any IRQ's pending in the soft interrupt + * registers, a spurious one could be left around from the + * PROM timer which we just disabled. + */ + clear_softint(get_softint()); + + /* Now that ivector table is initialized, it is safe + * to receive IRQ vector traps. We will normally take + * one or two right now, in case some device PROM used + * to boot us wants to speak to us. We just ignore them. + */ + __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" + "or %%g1, %0, %%g1\n\t" + "wrpr %%g1, 0x0, %%pstate" + : /* No outputs */ + : "i" (PSTATE_IE) + : "g1"); + + irq_to_desc(0)->action = &timer_irq_action; +} diff --git a/arch/sparc/kernel/itlb_miss.S b/arch/sparc/kernel/itlb_miss.S new file mode 100644 index 000000000..5a5d92482 --- /dev/null +++ b/arch/sparc/kernel/itlb_miss.S @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* ITLB ** ICACHE line 1: Context 0 check and TSB load */ + ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer + ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET + srlx %g6, 48, %g5 ! Get context + sllx %g6, 22, %g6 ! Zero out context + brz,pn %g5, kvmap_itlb ! Context 0 processing + srlx %g6, 22, %g6 ! Delay slot + TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry + cmp %g4, %g6 ! Compare TAG + +/* ITLB ** ICACHE line 2: TSB compare and TLB load */ + bne,pn %xcc, tsb_miss_itlb ! Miss + mov FAULT_CODE_ITLB, %g3 + sethi %hi(_PAGE_EXEC_4U), %g4 + andcc %g5, %g4, %g0 ! Executable? + be,pn %xcc, tsb_do_fault + nop ! Delay slot, fill me + stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB + retry ! Trap done + +/* ITLB ** ICACHE line 3: */ + nop + nop + nop + nop + nop + nop + nop + nop + +/* ITLB ** ICACHE line 4: */ + nop + nop + nop + nop + nop + nop + nop + nop diff --git a/arch/sparc/kernel/ivec.S b/arch/sparc/kernel/ivec.S new file mode 100644 index 000000000..94ba2c3a2 --- /dev/null +++ b/arch/sparc/kernel/ivec.S @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + /* The registers for cross calls will be: + * + * DATA 0: [low 32-bits] Address of function to call, jmp to this + * [high 32-bits] MMU Context Argument 0, place in %g5 + * DATA 1: Address Argument 1, place in %g1 + * DATA 2: Address Argument 2, place in %g7 + * + * With this method we can do most of the cross-call tlb/cache + * flushing very quickly. + */ + .align 32 + .globl do_ivec + .type do_ivec,#function +do_ivec: + mov 0x40, %g3 + ldxa [%g3 + %g0] ASI_INTR_R, %g3 + sethi %hi(KERNBASE), %g4 + cmp %g3, %g4 + bgeu,pn %xcc, do_ivec_xcall + srlx %g3, 32, %g5 + stxa %g0, [%g0] ASI_INTR_RECEIVE + membar #Sync + + sethi %hi(ivector_table_pa), %g2 + ldx [%g2 + %lo(ivector_table_pa)], %g2 + sllx %g3, 4, %g3 + add %g2, %g3, %g3 + + TRAP_LOAD_IRQ_WORK_PA(%g6, %g1) + + ldx [%g6], %g5 + stxa %g5, [%g3] ASI_PHYS_USE_EC + stx %g3, [%g6] + wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint + retry +do_ivec_xcall: + mov 0x50, %g1 + ldxa [%g1 + %g0] ASI_INTR_R, %g1 + srl %g3, 0, %g3 + + mov 0x60, %g7 + ldxa [%g7 + %g0] ASI_INTR_R, %g7 + stxa %g0, [%g0] ASI_INTR_RECEIVE + membar #Sync + ba,pt %xcc, 1f + nop + + .align 32 +1: jmpl %g3, %g0 + nop + .size do_ivec,.-do_ivec diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c new file mode 100644 index 000000000..a4cfaeeca --- /dev/null +++ b/arch/sparc/kernel/jump_label.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +#include +#include + +#include + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + u32 *insn = (u32 *) (unsigned long) entry->code; + u32 val; + + if (type == JUMP_LABEL_JMP) { + s32 off = (s32)entry->target - (s32)entry->code; + bool use_v9_branch = false; + + BUG_ON(off & 3); + +#ifdef CONFIG_SPARC64 + if (off <= 0xfffff && off >= -0x100000) + use_v9_branch = true; +#endif + if (use_v9_branch) { + /* WDISP19 - target is . + immed << 2 */ + /* ba,pt %xcc, . + off */ + val = 0x10680000 | (((u32) off >> 2) & 0x7ffff); + } else { + /* WDISP22 - target is . + immed << 2 */ + BUG_ON(off > 0x7fffff); + BUG_ON(off < -0x800000); + /* ba . + off */ + val = 0x10800000 | (((u32) off >> 2) & 0x3fffff); + } + } else { + val = 0x01000000; + } + + mutex_lock(&text_mutex); + *insn = val; + flushi(insn); + mutex_unlock(&text_mutex); +} diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h new file mode 100644 index 000000000..9cd09a3ef --- /dev/null +++ b/arch/sparc/kernel/kernel.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SPARC_KERNEL_H +#define __SPARC_KERNEL_H + +#include +#include + +#include +#include +#include + +/* cpu.c */ +extern const char *sparc_pmu_type; +extern unsigned int fsr_storage; +extern int ncpus_probed; + +/* process{_32,_64}.c */ +asmlinkage long sparc_clone(struct pt_regs *regs); +asmlinkage long sparc_fork(struct pt_regs *regs); +asmlinkage long sparc_vfork(struct pt_regs *regs); + +#ifdef CONFIG_SPARC64 +/* setup_64.c */ +struct seq_file; +void cpucap_info(struct seq_file *); + +static inline unsigned long kimage_addr_to_ra(const void *p) +{ + unsigned long val = (unsigned long) p; + + return kern_base + (val - KERNBASE); +} + +/* sys_sparc_64.c */ +asmlinkage long sys_kern_features(void); + +/* unaligned_64.c */ +asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); +int handle_popc(u32 insn, struct pt_regs *regs); +void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr); +void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr); + +/* smp_64.c */ +void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); +void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); +void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); +void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); + +/* kgdb_64.c */ +void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs); + +/* pci.c */ +#ifdef CONFIG_PCI +int ali_sound_dma_hack(struct device *dev, u64 device_mask); +#else +#define ali_sound_dma_hack(dev, mask) (0) +#endif + +/* signal32.c */ +void do_sigreturn32(struct pt_regs *regs); +asmlinkage void do_rt_sigreturn32(struct pt_regs *regs); +void do_signal32(struct pt_regs * regs); +asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp); + +/* time_64.c */ +void __init time_init_early(void); + +/* compat_audit.c */ +extern unsigned int sparc32_dir_class[]; +extern unsigned int sparc32_chattr_class[]; +extern unsigned int sparc32_write_class[]; +extern unsigned int sparc32_read_class[]; +extern unsigned int sparc32_signal_class[]; +int sparc32_classify_syscall(unsigned int syscall); +#endif + +#ifdef CONFIG_SPARC32 +/* setup_32.c */ +struct linux_romvec; +void sparc32_start_kernel(struct linux_romvec *rp); + +/* cpu.c */ +void cpu_probe(void); + +/* traps_32.c */ +void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +/* irq_32.c */ +extern struct irqaction static_irqaction[]; +extern int static_irq_count; +extern spinlock_t irq_action_lock; + +void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs); +void init_IRQ(void); + +/* sun4m_irq.c */ +void sun4m_init_IRQ(void); +void sun4m_unmask_profile_irq(void); +void sun4m_clear_profile_irq(int cpu); + +/* sun4m_smp.c */ +void sun4m_cpu_pre_starting(void *arg); +void sun4m_cpu_pre_online(void *arg); +void __init smp4m_boot_cpus(void); +int smp4m_boot_one_cpu(int i, struct task_struct *idle); +void __init smp4m_smp_done(void); +void smp4m_cross_call_irq(void); +void smp4m_percpu_timer_interrupt(struct pt_regs *regs); + +/* sun4d_irq.c */ +extern spinlock_t sun4d_imsk_lock; + +void sun4d_init_IRQ(void); +int sun4d_request_irq(unsigned int irq, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, void *dev_id); +int show_sun4d_interrupts(struct seq_file *, void *); +void sun4d_distribute_irqs(void); +void sun4d_free_irq(unsigned int irq, void *dev_id); + +/* sun4d_smp.c */ +void sun4d_cpu_pre_starting(void *arg); +void sun4d_cpu_pre_online(void *arg); +void __init smp4d_boot_cpus(void); +int smp4d_boot_one_cpu(int i, struct task_struct *idle); +void __init smp4d_smp_done(void); +void smp4d_cross_call_irq(void); +void smp4d_percpu_timer_interrupt(struct pt_regs *regs); + +/* leon_smp.c */ +void leon_cpu_pre_starting(void *arg); +void leon_cpu_pre_online(void *arg); +void leonsmp_ipi_interrupt(void); +void leon_cross_call_irq(void); + +/* head_32.S */ +extern unsigned int t_nmi[]; +extern unsigned int linux_trap_ipi15_sun4d[]; +extern unsigned int linux_trap_ipi15_sun4m[]; + +extern struct tt_entry trapbase; +extern struct tt_entry trapbase_cpu1; +extern struct tt_entry trapbase_cpu2; +extern struct tt_entry trapbase_cpu3; + +extern char cputypval[]; + +/* entry.S */ +extern unsigned long lvl14_save[4]; +extern unsigned int real_irq_entry[]; +extern unsigned int smp4d_ticker[]; +extern unsigned int patchme_maybe_smp_msg[]; + +void floppy_hardint(void); + +/* trampoline_32.S */ +extern unsigned long sun4m_cpu_startup; +extern unsigned long sun4d_cpu_startup; + +/* signal_32.c */ +asmlinkage void do_sigreturn(struct pt_regs *regs); +asmlinkage void do_rt_sigreturn(struct pt_regs *regs); +void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, + unsigned long thread_info_flags); +asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr, + struct sigstack __user *ossptr, + unsigned long sp); + +/* ptrace_32.c */ +asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p); + +/* unaligned_32.c */ +asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); +asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn); + +/* windows.c */ +void try_to_clear_window_buffer(struct pt_regs *regs, int who); + +/* auxio_32.c */ +void __init auxio_probe(void); +void __init auxio_power_probe(void); + +/* pcic.c */ +extern void __iomem *pcic_regs; +void pcic_nmi(unsigned int pend, struct pt_regs *regs); + +/* time_32.c */ +void __init time_init(void); + +#else /* CONFIG_SPARC32 */ +#endif /* CONFIG_SPARC32 */ +#endif /* !(__SPARC_KERNEL_H) */ diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c new file mode 100644 index 000000000..58ad3f7de --- /dev/null +++ b/arch/sparc/kernel/kgdb_32.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 +/* kgdb.c: KGDB support for 32-bit sparc. + * + * Copyright (C) 2008 David S. Miller + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "kernel.h" +#include "entry.h" + +void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + struct reg_window32 *win; + int i; + + gdb_regs[GDB_G0] = 0; + for (i = 0; i < 15; i++) + gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i]; + + win = (struct reg_window32 *) regs->u_regs[UREG_FP]; + for (i = 0; i < 8; i++) + gdb_regs[GDB_L0 + i] = win->locals[i]; + for (i = 0; i < 8; i++) + gdb_regs[GDB_I0 + i] = win->ins[i]; + + for (i = GDB_F0; i <= GDB_F31; i++) + gdb_regs[i] = 0; + + gdb_regs[GDB_Y] = regs->y; + gdb_regs[GDB_PSR] = regs->psr; + gdb_regs[GDB_WIM] = 0; + gdb_regs[GDB_TBR] = (unsigned long) &trapbase; + gdb_regs[GDB_PC] = regs->pc; + gdb_regs[GDB_NPC] = regs->npc; + gdb_regs[GDB_FSR] = 0; + gdb_regs[GDB_CSR] = 0; +} + +void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) +{ + struct thread_info *t = task_thread_info(p); + struct reg_window32 *win; + int i; + + for (i = GDB_G0; i < GDB_G6; i++) + gdb_regs[i] = 0; + gdb_regs[GDB_G6] = (unsigned long) t; + gdb_regs[GDB_G7] = 0; + for (i = GDB_O0; i < GDB_SP; i++) + gdb_regs[i] = 0; + gdb_regs[GDB_SP] = t->ksp; + gdb_regs[GDB_O7] = 0; + + win = (struct reg_window32 *) t->ksp; + for (i = 0; i < 8; i++) + gdb_regs[GDB_L0 + i] = win->locals[i]; + for (i = 0; i < 8; i++) + gdb_regs[GDB_I0 + i] = win->ins[i]; + + for (i = GDB_F0; i <= GDB_F31; i++) + gdb_regs[i] = 0; + + gdb_regs[GDB_Y] = 0; + + gdb_regs[GDB_PSR] = t->kpsr; + gdb_regs[GDB_WIM] = t->kwim; + gdb_regs[GDB_TBR] = (unsigned long) &trapbase; + gdb_regs[GDB_PC] = t->kpc; + gdb_regs[GDB_NPC] = t->kpc + 4; + gdb_regs[GDB_FSR] = 0; + gdb_regs[GDB_CSR] = 0; +} + +void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + struct reg_window32 *win; + int i; + + for (i = 0; i < 15; i++) + regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i]; + + /* If the PSR register is changing, we have to preserve + * the CWP field, otherwise window save/restore explodes. + */ + if (regs->psr != gdb_regs[GDB_PSR]) { + unsigned long cwp = regs->psr & PSR_CWP; + + regs->psr = (gdb_regs[GDB_PSR] & ~PSR_CWP) | cwp; + } + + regs->pc = gdb_regs[GDB_PC]; + regs->npc = gdb_regs[GDB_NPC]; + regs->y = gdb_regs[GDB_Y]; + + win = (struct reg_window32 *) regs->u_regs[UREG_FP]; + for (i = 0; i < 8; i++) + win->locals[i] = gdb_regs[GDB_L0 + i]; + for (i = 0; i < 8; i++) + win->ins[i] = gdb_regs[GDB_I0 + i]; +} + +int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, + char *remcomInBuffer, char *remcomOutBuffer, + struct pt_regs *linux_regs) +{ + unsigned long addr; + char *ptr; + + switch (remcomInBuffer[0]) { + case 'c': + /* try to read optional parameter, pc unchanged if no parm */ + ptr = &remcomInBuffer[1]; + if (kgdb_hex2long(&ptr, &addr)) { + linux_regs->pc = addr; + linux_regs->npc = addr + 4; + } + fallthrough; + + case 'D': + case 'k': + if (linux_regs->pc == (unsigned long) arch_kgdb_breakpoint) { + linux_regs->pc = linux_regs->npc; + linux_regs->npc += 4; + } + return 0; + } + return -1; +} + +asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) +{ + unsigned long flags; + + if (user_mode(regs)) { + do_hw_interrupt(regs, trap_level); + return; + } + + flushw_all(); + + local_irq_save(flags); + kgdb_handle_exception(trap_level, SIGTRAP, 0, regs); + local_irq_restore(flags); +} + +int kgdb_arch_init(void) +{ + return 0; +} + +void kgdb_arch_exit(void) +{ +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->pc = ip; + regs->npc = regs->pc + 4; +} + +const struct kgdb_arch arch_kgdb_ops = { + /* Breakpoint instruction: ta 0x7d */ + .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d }, +}; diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c new file mode 100644 index 000000000..177746ae2 --- /dev/null +++ b/arch/sparc/kernel/kgdb_64.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* kgdb.c: KGDB support for 64-bit sparc. + * + * Copyright (C) 2008 David S. Miller + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "kernel.h" + +void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + struct reg_window *win; + int i; + + gdb_regs[GDB_G0] = 0; + for (i = 0; i < 15; i++) + gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i]; + + win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); + for (i = 0; i < 8; i++) + gdb_regs[GDB_L0 + i] = win->locals[i]; + for (i = 0; i < 8; i++) + gdb_regs[GDB_I0 + i] = win->ins[i]; + + for (i = GDB_F0; i <= GDB_F62; i++) + gdb_regs[i] = 0; + + gdb_regs[GDB_PC] = regs->tpc; + gdb_regs[GDB_NPC] = regs->tnpc; + gdb_regs[GDB_STATE] = regs->tstate; + gdb_regs[GDB_FSR] = 0; + gdb_regs[GDB_FPRS] = 0; + gdb_regs[GDB_Y] = regs->y; +} + +void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) +{ + struct thread_info *t = task_thread_info(p); + extern unsigned int switch_to_pc; + extern unsigned int ret_from_fork; + struct reg_window *win; + unsigned long pc, cwp; + int i; + + for (i = GDB_G0; i < GDB_G6; i++) + gdb_regs[i] = 0; + gdb_regs[GDB_G6] = (unsigned long) t; + gdb_regs[GDB_G7] = (unsigned long) p; + for (i = GDB_O0; i < GDB_SP; i++) + gdb_regs[i] = 0; + gdb_regs[GDB_SP] = t->ksp; + gdb_regs[GDB_O7] = 0; + + win = (struct reg_window *) (t->ksp + STACK_BIAS); + for (i = 0; i < 8; i++) + gdb_regs[GDB_L0 + i] = win->locals[i]; + for (i = 0; i < 8; i++) + gdb_regs[GDB_I0 + i] = win->ins[i]; + + for (i = GDB_F0; i <= GDB_F62; i++) + gdb_regs[i] = 0; + + if (t->new_child) + pc = (unsigned long) &ret_from_fork; + else + pc = (unsigned long) &switch_to_pc; + + gdb_regs[GDB_PC] = pc; + gdb_regs[GDB_NPC] = pc + 4; + + cwp = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP]; + + gdb_regs[GDB_STATE] = (TSTATE_PRIV | TSTATE_IE | cwp); + gdb_regs[GDB_FSR] = 0; + gdb_regs[GDB_FPRS] = 0; + gdb_regs[GDB_Y] = 0; +} + +void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + struct reg_window *win; + int i; + + for (i = 0; i < 15; i++) + regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i]; + + /* If the TSTATE register is changing, we have to preserve + * the CWP field, otherwise window save/restore explodes. + */ + if (regs->tstate != gdb_regs[GDB_STATE]) { + unsigned long cwp = regs->tstate & TSTATE_CWP; + + regs->tstate = (gdb_regs[GDB_STATE] & ~TSTATE_CWP) | cwp; + } + + regs->tpc = gdb_regs[GDB_PC]; + regs->tnpc = gdb_regs[GDB_NPC]; + regs->y = gdb_regs[GDB_Y]; + + win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); + for (i = 0; i < 8; i++) + win->locals[i] = gdb_regs[GDB_L0 + i]; + for (i = 0; i < 8; i++) + win->ins[i] = gdb_regs[GDB_I0 + i]; +} + +#ifdef CONFIG_SMP +void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs) +{ + unsigned long flags; + + __asm__ __volatile__("rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (flags) + : "i" (PSTATE_IE)); + + flushw_all(); + + if (atomic_read(&kgdb_active) != -1) + kgdb_nmicallback(raw_smp_processor_id(), regs); + + __asm__ __volatile__("wrpr %0, 0, %%pstate" + : : "r" (flags)); +} +#endif + +int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, + char *remcomInBuffer, char *remcomOutBuffer, + struct pt_regs *linux_regs) +{ + unsigned long addr; + char *ptr; + + switch (remcomInBuffer[0]) { + case 'c': + /* try to read optional parameter, pc unchanged if no parm */ + ptr = &remcomInBuffer[1]; + if (kgdb_hex2long(&ptr, &addr)) { + linux_regs->tpc = addr; + linux_regs->tnpc = addr + 4; + } + fallthrough; + + case 'D': + case 'k': + if (linux_regs->tpc == (unsigned long) arch_kgdb_breakpoint) { + linux_regs->tpc = linux_regs->tnpc; + linux_regs->tnpc += 4; + } + return 0; + } + return -1; +} + +asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + unsigned long flags; + + if (user_mode(regs)) { + bad_trap(regs, trap_level); + goto out; + } + + flushw_all(); + + local_irq_save(flags); + kgdb_handle_exception(0x172, SIGTRAP, 0, regs); + local_irq_restore(flags); +out: + exception_exit(prev_state); +} + +int kgdb_arch_init(void) +{ + return 0; +} + +void kgdb_arch_exit(void) +{ +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->tpc = ip; + regs->tnpc = regs->tpc + 4; +} + +const struct kgdb_arch arch_kgdb_ops = { + /* Breakpoint instruction: ta 0x72 */ + .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 }, +}; diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c new file mode 100644 index 000000000..535c7b35c --- /dev/null +++ b/arch/sparc/kernel/kprobes.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0 +/* arch/sparc64/kernel/kprobes.c + * + * Copyright (C) 2004 David S. Miller + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* We do not have hardware single-stepping on sparc64. + * So we implement software single-stepping with breakpoint + * traps. The top-level scheme is similar to that used + * in the x86 kprobes implementation. + * + * In the kprobe->ainsn.insn[] array we store the original + * instruction at index zero and a break instruction at + * index one. + * + * When we hit a kprobe we: + * - Run the pre-handler + * - Remember "regs->tnpc" and interrupt level stored in + * "regs->tstate" so we can restore them later + * - Disable PIL interrupts + * - Set regs->tpc to point to kprobe->ainsn.insn[0] + * - Set regs->tnpc to point to kprobe->ainsn.insn[1] + * - Mark that we are actively in a kprobe + * + * At this point we wait for the second breakpoint at + * kprobe->ainsn.insn[1] to hit. When it does we: + * - Run the post-handler + * - Set regs->tpc to "remembered" regs->tnpc stored above, + * restore the PIL interrupt level in "regs->tstate" as well + * - Make any adjustments necessary to regs->tnpc in order + * to handle relative branches correctly. See below. + * - Mark that we are no longer actively in a kprobe. + */ + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + if ((unsigned long) p->addr & 0x3UL) + return -EILSEQ; + + p->ainsn.insn[0] = *p->addr; + flushi(&p->ainsn.insn[0]); + + p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; + flushi(&p->ainsn.insn[1]); + + p->opcode = *p->addr; + return 0; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + *p->addr = BREAKPOINT_INSTRUCTION; + flushi(p->addr); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + *p->addr = p->opcode; + flushi(p->addr); +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; + kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc; + kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; + kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; + kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; +} + +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, p); + kcb->kprobe_orig_tnpc = regs->tnpc; + kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); +} + +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + regs->tstate |= TSTATE_PIL; + + /*single step inline, if it a breakpoint instruction*/ + if (p->opcode == BREAKPOINT_INSTRUCTION) { + regs->tpc = (unsigned long) p->addr; + regs->tnpc = kcb->kprobe_orig_tnpc; + } else { + regs->tpc = (unsigned long) &p->ainsn.insn[0]; + regs->tnpc = (unsigned long) &p->ainsn.insn[1]; + } +} + +static int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + void *addr = (void *) regs->tpc; + int ret = 0; + struct kprobe_ctlblk *kcb; + + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + + if (kprobe_running()) { + p = get_kprobe(addr); + if (p) { + if (kcb->kprobe_status == KPROBE_HIT_SS) { + regs->tstate = ((regs->tstate & ~TSTATE_PIL) | + kcb->kprobe_orig_tstate_pil); + goto no_kprobe; + } + /* We have reentered the kprobe_handler(), since + * another probe was hit while within the handler. + * We here save the original kprobes variables and + * just single step on the instruction of the new probe + * without calling any user handlers. + */ + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kprobes_inc_nmissed_count(p); + kcb->kprobe_status = KPROBE_REENTER; + prepare_singlestep(p, regs, kcb); + return 1; + } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { + /* The breakpoint instruction was removed by + * another cpu right after we hit, no further + * handling of this interrupt is appropriate + */ + ret = 1; + } + goto no_kprobe; + } + + p = get_kprobe(addr); + if (!p) { + if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + */ + ret = 1; + } + /* Not one of ours: let kernel handle it */ + goto no_kprobe; + } + + set_current_kprobe(p, regs, kcb); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (p->pre_handler && p->pre_handler(p, regs)) { + reset_current_kprobe(); + preempt_enable_no_resched(); + return 1; + } + + prepare_singlestep(p, regs, kcb); + kcb->kprobe_status = KPROBE_HIT_SS; + return 1; + +no_kprobe: + preempt_enable_no_resched(); + return ret; +} + +/* If INSN is a relative control transfer instruction, + * return the corrected branch destination value. + * + * regs->tpc and regs->tnpc still hold the values of the + * program counters at the time of trap due to the execution + * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1] + * + */ +static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p, + struct pt_regs *regs) +{ + unsigned long real_pc = (unsigned long) p->addr; + + /* Branch not taken, no mods necessary. */ + if (regs->tnpc == regs->tpc + 0x4UL) + return real_pc + 0x8UL; + + /* The three cases are call, branch w/prediction, + * and traditional branch. + */ + if ((insn & 0xc0000000) == 0x40000000 || + (insn & 0xc1c00000) == 0x00400000 || + (insn & 0xc1c00000) == 0x00800000) { + unsigned long ainsn_addr; + + ainsn_addr = (unsigned long) &p->ainsn.insn[0]; + + /* The instruction did all the work for us + * already, just apply the offset to the correct + * instruction location. + */ + return (real_pc + (regs->tnpc - ainsn_addr)); + } + + /* It is jmpl or some other absolute PC modification instruction, + * leave NPC as-is. + */ + return regs->tnpc; +} + +/* If INSN is an instruction which writes it's PC location + * into a destination register, fix that up. + */ +static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn, + unsigned long real_pc) +{ + unsigned long *slot = NULL; + + /* Simplest case is 'call', which always uses %o7 */ + if ((insn & 0xc0000000) == 0x40000000) { + slot = ®s->u_regs[UREG_I7]; + } + + /* 'jmpl' encodes the register inside of the opcode */ + if ((insn & 0xc1f80000) == 0x81c00000) { + unsigned long rd = ((insn >> 25) & 0x1f); + + if (rd <= 15) { + slot = ®s->u_regs[rd]; + } else { + /* Hard case, it goes onto the stack. */ + flushw_all(); + + rd -= 16; + slot = (unsigned long *) + (regs->u_regs[UREG_FP] + STACK_BIAS); + slot += rd; + } + } + if (slot != NULL) + *slot = real_pc; +} + +/* + * Called after single-stepping. p->addr is the address of the + * instruction which has been replaced by the breakpoint + * instruction. To avoid the SMP problems that can occur when we + * temporarily put back the original opcode to single-step, we + * single-stepped a copy of the instruction. The address of this + * copy is &p->ainsn.insn[0]. + * + * This function prepares to return from the post-single-step + * breakpoint trap. + */ +static void __kprobes resume_execution(struct kprobe *p, + struct pt_regs *regs, struct kprobe_ctlblk *kcb) +{ + u32 insn = p->ainsn.insn[0]; + + regs->tnpc = relbranch_fixup(insn, p, regs); + + /* This assignment must occur after relbranch_fixup() */ + regs->tpc = kcb->kprobe_orig_tnpc; + + retpc_fixup(regs, insn, (unsigned long) p->addr); + + regs->tstate = ((regs->tstate & ~TSTATE_PIL) | + kcb->kprobe_orig_tstate_pil); +} + +static int __kprobes post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + resume_execution(cur, regs, kcb); + + /*Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + return 1; +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + const struct exception_table_entry *entry; + + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the tpc points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->tpc = (unsigned long)cur->addr; + regs->tnpc = kcb->kprobe_orig_tnpc; + regs->tstate = ((regs->tstate & ~TSTATE_PIL) | + kcb->kprobe_orig_tstate_pil); + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + + entry = search_exception_tables(regs->tpc); + if (entry) { + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + return 1; + } + + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; + } + + return 0; +} + +/* + * Wrapper routine to for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + if (args->regs && user_mode(args->regs)) + return ret; + + switch (val) { + case DIE_DEBUG: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_DEBUG_2: + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + default: + break; + } + return ret; +} + +asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, + struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + + BUG_ON(trap_level != 0x170 && trap_level != 0x171); + + if (user_mode(regs)) { + local_irq_enable(); + bad_trap(regs, trap_level); + goto out; + } + + /* trap_level == 0x170 --> ta 0x70 + * trap_level == 0x171 --> ta 0x71 + */ + if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2, + (trap_level == 0x170) ? "debug" : "debug_2", + regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) + bad_trap(regs, trap_level); +out: + exception_exit(prev_state); +} + +/* The value stored in the return address register is actually 2 + * instructions before where the callee will return to. + * Sequences usually look something like this + * + * call some_function <--- return register points here + * nop <--- call delay slot + * whatever <--- where callee returns to + * + * To keep trampoline_probe_handler logic simpler, we normalize the + * value kept in ri->ret_addr so we don't need to keep adjusting it + * back and forth. + */ +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); + ri->fp = NULL; + + /* Replace the return addr with trampoline addr */ + regs->u_regs[UREG_RETPC] = + ((unsigned long)__kretprobe_trampoline) - 8; +} + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + unsigned long orig_ret_address = 0; + + orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); + regs->tpc = orig_ret_address; + regs->tnpc = orig_ret_address + 4; + + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; +} + +static void __used kretprobe_trampoline_holder(void) +{ + asm volatile(".global __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n" + "\tnop\n" + "\tnop\n"); +} +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) + return 1; + + return 0; +} diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h new file mode 100644 index 000000000..b3c5e8f24 --- /dev/null +++ b/arch/sparc/kernel/kstack.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _KSTACK_H +#define _KSTACK_H + +#include +#include +#include +#include + +/* SP must be STACK_BIAS adjusted already. */ +static inline bool kstack_valid(struct thread_info *tp, unsigned long sp) +{ + unsigned long base = (unsigned long) tp; + + /* Stack pointer must be 16-byte aligned. */ + if (sp & (16UL - 1)) + return false; + + if (sp >= (base + sizeof(struct thread_info)) && + sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) + return true; + + if (hardirq_stack[tp->cpu]) { + base = (unsigned long) hardirq_stack[tp->cpu]; + if (sp >= base && + sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) + return true; + base = (unsigned long) softirq_stack[tp->cpu]; + if (sp >= base && + sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) + return true; + } + return false; +} + +/* Does "regs" point to a valid pt_regs trap frame? */ +static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs) +{ + unsigned long base = (unsigned long) tp; + unsigned long addr = (unsigned long) regs; + + if (addr >= base && + addr <= (base + THREAD_SIZE - sizeof(*regs))) + goto check_magic; + + if (hardirq_stack[tp->cpu]) { + base = (unsigned long) hardirq_stack[tp->cpu]; + if (addr >= base && + addr <= (base + THREAD_SIZE - sizeof(*regs))) + goto check_magic; + base = (unsigned long) softirq_stack[tp->cpu]; + if (addr >= base && + addr <= (base + THREAD_SIZE - sizeof(*regs))) + goto check_magic; + } + return false; + +check_magic: + if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) + return true; + return false; + +} + +static inline __attribute__((always_inline)) void *set_hardirq_stack(void) +{ + void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; + + __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); + if (orig_sp < sp || + orig_sp > (sp + THREAD_SIZE)) { + sp += THREAD_SIZE - 192 - STACK_BIAS; + __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); + } + + return orig_sp; +} + +static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) +{ + __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); +} + +#endif /* _KSTACK_H */ diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S new file mode 100644 index 000000000..6bfaf73ce --- /dev/null +++ b/arch/sparc/kernel/ktlb.S @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. + * + * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller + * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) + * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include +#include +#include +#include +#include + + .text + .align 32 + +kvmap_itlb: + /* g6: TAG TARGET */ + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_IMMU, %g4 + + /* The kernel executes in context zero, therefore we do not + * need to clear the context ID bits out of %g4 here. + */ + + /* sun4v_itlb_miss branches here with the missing virtual + * address already loaded into %g4 + */ +kvmap_itlb_4v: + + /* Catch kernel NULL pointer calls. */ + sethi %hi(PAGE_SIZE), %g5 + cmp %g4, %g5 + blu,pn %xcc, kvmap_itlb_longpath + nop + + KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) + +kvmap_itlb_tsb_miss: + sethi %hi(LOW_OBP_ADDRESS), %g5 + cmp %g4, %g5 + blu,pn %xcc, kvmap_itlb_vmalloc_addr + mov 0x1, %g5 + sllx %g5, 32, %g5 + cmp %g4, %g5 + blu,pn %xcc, kvmap_itlb_obp + nop + +kvmap_itlb_vmalloc_addr: + KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) + + TSB_LOCK_TAG(%g1, %g2, %g7) + TSB_WRITE(%g1, %g5, %g6) + + /* fallthrough to TLB load */ + +kvmap_itlb_load: + +661: stxa %g5, [%g0] ASI_ITLB_DATA_IN + retry + .section .sun4v_2insn_patch, "ax" + .word 661b + nop + nop + .previous + + /* For sun4v the ASI_ITLB_DATA_IN store and the retry + * instruction get nop'd out and we get here to branch + * to the sun4v tlb load code. The registers are setup + * as follows: + * + * %g4: vaddr + * %g5: PTE + * %g6: TAG + * + * The sun4v TLB load wants the PTE in %g3 so we fix that + * up here. + */ + ba,pt %xcc, sun4v_itlb_load + mov %g5, %g3 + +kvmap_itlb_longpath: + +661: rdpr %pstate, %g5 + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + .section .sun4v_2insn_patch, "ax" + .word 661b + SET_GL(1) + nop + .previous + + rdpr %tpc, %g5 + ba,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_ITLB, %g4 + +kvmap_itlb_obp: + OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) + + TSB_LOCK_TAG(%g1, %g2, %g7) + + TSB_WRITE(%g1, %g5, %g6) + + ba,pt %xcc, kvmap_itlb_load + nop + +kvmap_dtlb_obp: + OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) + + TSB_LOCK_TAG(%g1, %g2, %g7) + + TSB_WRITE(%g1, %g5, %g6) + + ba,pt %xcc, kvmap_dtlb_load + nop + +kvmap_linear_early: + sethi %hi(kern_linear_pte_xor), %g7 + ldx [%g7 + %lo(kern_linear_pte_xor)], %g2 + ba,pt %xcc, kvmap_dtlb_tsb4m_load + xor %g2, %g4, %g5 + + .align 32 +kvmap_dtlb_tsb4m_load: + TSB_LOCK_TAG(%g1, %g2, %g7) + TSB_WRITE(%g1, %g5, %g6) + ba,pt %xcc, kvmap_dtlb_load + nop + +kvmap_dtlb: + /* %g6: TAG TARGET */ + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g4 + + /* The kernel executes in context zero, therefore we do not + * need to clear the context ID bits out of %g4 here. + */ + + /* sun4v_dtlb_miss branches here with the missing virtual + * address already loaded into %g4 + */ +kvmap_dtlb_4v: + brgez,pn %g4, kvmap_dtlb_nonlinear + nop + +#ifdef CONFIG_DEBUG_PAGEALLOC + /* Index through the base page size TSB even for linear + * mappings when using page allocation debugging. + */ + KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) +#else + /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ + KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) +#endif + /* Linear mapping TSB lookup failed. Fallthrough to kernel + * page table based lookup. + */ + .globl kvmap_linear_patch +kvmap_linear_patch: + ba,a,pt %xcc, kvmap_linear_early + +kvmap_dtlb_vmalloc_addr: + KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) + + TSB_LOCK_TAG(%g1, %g2, %g7) + TSB_WRITE(%g1, %g5, %g6) + + /* fallthrough to TLB load */ + +kvmap_dtlb_load: + +661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB + retry + .section .sun4v_2insn_patch, "ax" + .word 661b + nop + nop + .previous + + /* For sun4v the ASI_DTLB_DATA_IN store and the retry + * instruction get nop'd out and we get here to branch + * to the sun4v tlb load code. The registers are setup + * as follows: + * + * %g4: vaddr + * %g5: PTE + * %g6: TAG + * + * The sun4v TLB load wants the PTE in %g3 so we fix that + * up here. + */ + ba,pt %xcc, sun4v_dtlb_load + mov %g5, %g3 + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +kvmap_vmemmap: + KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) + ba,a,pt %xcc, kvmap_dtlb_load +#endif + +kvmap_dtlb_nonlinear: + /* Catch kernel NULL pointer derefs. */ + sethi %hi(PAGE_SIZE), %g5 + cmp %g4, %g5 + bleu,pn %xcc, kvmap_dtlb_longpath + nop + +#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* Do not use the TSB for vmemmap. */ + sethi %hi(VMEMMAP_BASE), %g5 + ldx [%g5 + %lo(VMEMMAP_BASE)], %g5 + cmp %g4,%g5 + bgeu,pn %xcc, kvmap_vmemmap + nop +#endif + + KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) + +kvmap_dtlb_tsbmiss: + sethi %hi(MODULES_VADDR), %g5 + cmp %g4, %g5 + blu,pn %xcc, kvmap_dtlb_longpath + sethi %hi(VMALLOC_END), %g5 + ldx [%g5 + %lo(VMALLOC_END)], %g5 + cmp %g4, %g5 + bgeu,pn %xcc, kvmap_dtlb_longpath + nop + +kvmap_check_obp: + sethi %hi(LOW_OBP_ADDRESS), %g5 + cmp %g4, %g5 + blu,pn %xcc, kvmap_dtlb_vmalloc_addr + mov 0x1, %g5 + sllx %g5, 32, %g5 + cmp %g4, %g5 + blu,pn %xcc, kvmap_dtlb_obp + nop + ba,pt %xcc, kvmap_dtlb_vmalloc_addr + nop + +kvmap_dtlb_longpath: + +661: rdpr %pstate, %g5 + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + .section .sun4v_2insn_patch, "ax" + .word 661b + SET_GL(1) + ldxa [%g0] ASI_SCRATCHPAD, %g5 + .previous + + rdpr %tl, %g3 + cmp %g3, 1 + +661: mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g5 + .section .sun4v_2insn_patch, "ax" + .word 661b + ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 + nop + .previous + + /* The kernel executes in context zero, therefore we do not + * need to clear the context ID bits out of %g5 here. + */ + + be,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_DTLB, %g4 + ba,pt %xcc, winfix_trampoline + nop diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c new file mode 100644 index 000000000..c0fa3ef6c --- /dev/null +++ b/arch/sparc/kernel/ldc.c @@ -0,0 +1,2428 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ldc.c: Logical Domain Channel link-layer protocol driver. + * + * Copyright (C) 2007, 2008 David S. Miller + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define DRV_MODULE_NAME "ldc" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "July 22, 2008" + +#define COOKIE_PGSZ_CODE 0xf000000000000000ULL +#define COOKIE_PGSZ_CODE_SHIFT 60ULL + + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +/* Packet header layout for unreliable and reliable mode frames. + * When in RAW mode, packets are simply straight 64-byte payloads + * with no headers. + */ +struct ldc_packet { + u8 type; +#define LDC_CTRL 0x01 +#define LDC_DATA 0x02 +#define LDC_ERR 0x10 + + u8 stype; +#define LDC_INFO 0x01 +#define LDC_ACK 0x02 +#define LDC_NACK 0x04 + + u8 ctrl; +#define LDC_VERS 0x01 /* Link Version */ +#define LDC_RTS 0x02 /* Request To Send */ +#define LDC_RTR 0x03 /* Ready To Receive */ +#define LDC_RDX 0x04 /* Ready for Data eXchange */ +#define LDC_CTRL_MSK 0x0f + + u8 env; +#define LDC_LEN 0x3f +#define LDC_FRAG_MASK 0xc0 +#define LDC_START 0x40 +#define LDC_STOP 0x80 + + u32 seqid; + + union { + u8 u_data[LDC_PACKET_SIZE - 8]; + struct { + u32 pad; + u32 ackid; + u8 r_data[LDC_PACKET_SIZE - 8 - 8]; + } r; + } u; +}; + +struct ldc_version { + u16 major; + u16 minor; +}; + +/* Ordered from largest major to lowest. */ +static struct ldc_version ver_arr[] = { + { .major = 1, .minor = 0 }, +}; + +#define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE) +#define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE) + +struct ldc_channel; + +struct ldc_mode_ops { + int (*write)(struct ldc_channel *, const void *, unsigned int); + int (*read)(struct ldc_channel *, void *, unsigned int); +}; + +static const struct ldc_mode_ops raw_ops; +static const struct ldc_mode_ops nonraw_ops; +static const struct ldc_mode_ops stream_ops; + +int ldom_domaining_enabled; + +struct ldc_iommu { + /* Protects ldc_unmap. */ + spinlock_t lock; + struct ldc_mtable_entry *page_table; + struct iommu_map_table iommu_map_table; +}; + +struct ldc_channel { + /* Protects all operations that depend upon channel state. */ + spinlock_t lock; + + unsigned long id; + + u8 *mssbuf; + u32 mssbuf_len; + u32 mssbuf_off; + + struct ldc_packet *tx_base; + unsigned long tx_head; + unsigned long tx_tail; + unsigned long tx_num_entries; + unsigned long tx_ra; + + unsigned long tx_acked; + + struct ldc_packet *rx_base; + unsigned long rx_head; + unsigned long rx_tail; + unsigned long rx_num_entries; + unsigned long rx_ra; + + u32 rcv_nxt; + u32 snd_nxt; + + unsigned long chan_state; + + struct ldc_channel_config cfg; + void *event_arg; + + const struct ldc_mode_ops *mops; + + struct ldc_iommu iommu; + + struct ldc_version ver; + + u8 hs_state; +#define LDC_HS_CLOSED 0x00 +#define LDC_HS_OPEN 0x01 +#define LDC_HS_GOTVERS 0x02 +#define LDC_HS_SENTRTR 0x03 +#define LDC_HS_GOTRTR 0x04 +#define LDC_HS_COMPLETE 0x10 + + u8 flags; +#define LDC_FLAG_ALLOCED_QUEUES 0x01 +#define LDC_FLAG_REGISTERED_QUEUES 0x02 +#define LDC_FLAG_REGISTERED_IRQS 0x04 +#define LDC_FLAG_RESET 0x10 + + u8 mss; + u8 state; + +#define LDC_IRQ_NAME_MAX 32 + char rx_irq_name[LDC_IRQ_NAME_MAX]; + char tx_irq_name[LDC_IRQ_NAME_MAX]; + + struct hlist_head mh_list; + + struct hlist_node list; +}; + +#define ldcdbg(TYPE, f, a...) \ +do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \ + printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \ +} while (0) + +#define LDC_ABORT(lp) ldc_abort((lp), __func__) + +static const char *state_to_str(u8 state) +{ + switch (state) { + case LDC_STATE_INVALID: + return "INVALID"; + case LDC_STATE_INIT: + return "INIT"; + case LDC_STATE_BOUND: + return "BOUND"; + case LDC_STATE_READY: + return "READY"; + case LDC_STATE_CONNECTED: + return "CONNECTED"; + default: + return ""; + } +} + +static unsigned long __advance(unsigned long off, unsigned long num_entries) +{ + off += LDC_PACKET_SIZE; + if (off == (num_entries * LDC_PACKET_SIZE)) + off = 0; + + return off; +} + +static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off) +{ + return __advance(off, lp->rx_num_entries); +} + +static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off) +{ + return __advance(off, lp->tx_num_entries); +} + +static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp, + unsigned long *new_tail) +{ + struct ldc_packet *p; + unsigned long t; + + t = tx_advance(lp, lp->tx_tail); + if (t == lp->tx_head) + return NULL; + + *new_tail = t; + + p = lp->tx_base; + return p + (lp->tx_tail / LDC_PACKET_SIZE); +} + +/* When we are in reliable or stream mode, have to track the next packet + * we haven't gotten an ACK for in the TX queue using tx_acked. We have + * to be careful not to stomp over the queue past that point. During + * the handshake, we don't have TX data packets pending in the queue + * and that's why handshake_get_tx_packet() need not be mindful of + * lp->tx_acked. + */ +static unsigned long head_for_data(struct ldc_channel *lp) +{ + if (lp->cfg.mode == LDC_MODE_STREAM) + return lp->tx_acked; + return lp->tx_head; +} + +static int tx_has_space_for(struct ldc_channel *lp, unsigned int size) +{ + unsigned long limit, tail, new_tail, diff; + unsigned int mss; + + limit = head_for_data(lp); + tail = lp->tx_tail; + new_tail = tx_advance(lp, tail); + if (new_tail == limit) + return 0; + + if (limit > new_tail) + diff = limit - new_tail; + else + diff = (limit + + ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail)); + diff /= LDC_PACKET_SIZE; + mss = lp->mss; + + if (diff * mss < size) + return 0; + + return 1; +} + +static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp, + unsigned long *new_tail) +{ + struct ldc_packet *p; + unsigned long h, t; + + h = head_for_data(lp); + t = tx_advance(lp, lp->tx_tail); + if (t == h) + return NULL; + + *new_tail = t; + + p = lp->tx_base; + return p + (lp->tx_tail / LDC_PACKET_SIZE); +} + +static int set_tx_tail(struct ldc_channel *lp, unsigned long tail) +{ + unsigned long orig_tail = lp->tx_tail; + int limit = 1000; + + lp->tx_tail = tail; + while (limit-- > 0) { + unsigned long err; + + err = sun4v_ldc_tx_set_qtail(lp->id, tail); + if (!err) + return 0; + + if (err != HV_EWOULDBLOCK) { + lp->tx_tail = orig_tail; + return -EINVAL; + } + udelay(1); + } + + lp->tx_tail = orig_tail; + return -EBUSY; +} + +/* This just updates the head value in the hypervisor using + * a polling loop with a timeout. The caller takes care of + * upating software state representing the head change, if any. + */ +static int __set_rx_head(struct ldc_channel *lp, unsigned long head) +{ + int limit = 1000; + + while (limit-- > 0) { + unsigned long err; + + err = sun4v_ldc_rx_set_qhead(lp->id, head); + if (!err) + return 0; + + if (err != HV_EWOULDBLOCK) + return -EINVAL; + + udelay(1); + } + + return -EBUSY; +} + +static int send_tx_packet(struct ldc_channel *lp, + struct ldc_packet *p, + unsigned long new_tail) +{ + BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE))); + + return set_tx_tail(lp, new_tail); +} + +static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp, + u8 stype, u8 ctrl, + void *data, int dlen, + unsigned long *new_tail) +{ + struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail); + + if (p) { + memset(p, 0, sizeof(*p)); + p->type = LDC_CTRL; + p->stype = stype; + p->ctrl = ctrl; + if (data) + memcpy(p->u.u_data, data, dlen); + } + return p; +} + +static int start_handshake(struct ldc_channel *lp) +{ + struct ldc_packet *p; + struct ldc_version *ver; + unsigned long new_tail; + + ver = &ver_arr[0]; + + ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n", + ver->major, ver->minor); + + p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, + ver, sizeof(*ver), &new_tail); + if (p) { + int err = send_tx_packet(lp, p, new_tail); + if (!err) + lp->flags &= ~LDC_FLAG_RESET; + return err; + } + return -EBUSY; +} + +static int send_version_nack(struct ldc_channel *lp, + u16 major, u16 minor) +{ + struct ldc_packet *p; + struct ldc_version ver; + unsigned long new_tail; + + ver.major = major; + ver.minor = minor; + + p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS, + &ver, sizeof(ver), &new_tail); + if (p) { + ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n", + ver.major, ver.minor); + + return send_tx_packet(lp, p, new_tail); + } + return -EBUSY; +} + +static int send_version_ack(struct ldc_channel *lp, + struct ldc_version *vp) +{ + struct ldc_packet *p; + unsigned long new_tail; + + p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS, + vp, sizeof(*vp), &new_tail); + if (p) { + ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n", + vp->major, vp->minor); + + return send_tx_packet(lp, p, new_tail); + } + return -EBUSY; +} + +static int send_rts(struct ldc_channel *lp) +{ + struct ldc_packet *p; + unsigned long new_tail; + + p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0, + &new_tail); + if (p) { + p->env = lp->cfg.mode; + p->seqid = 0; + lp->rcv_nxt = 0; + + ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n", + p->env, p->seqid); + + return send_tx_packet(lp, p, new_tail); + } + return -EBUSY; +} + +static int send_rtr(struct ldc_channel *lp) +{ + struct ldc_packet *p; + unsigned long new_tail; + + p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0, + &new_tail); + if (p) { + p->env = lp->cfg.mode; + p->seqid = 0; + + ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n", + p->env, p->seqid); + + return send_tx_packet(lp, p, new_tail); + } + return -EBUSY; +} + +static int send_rdx(struct ldc_channel *lp) +{ + struct ldc_packet *p; + unsigned long new_tail; + + p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0, + &new_tail); + if (p) { + p->env = 0; + p->seqid = ++lp->snd_nxt; + p->u.r.ackid = lp->rcv_nxt; + + ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n", + p->env, p->seqid, p->u.r.ackid); + + return send_tx_packet(lp, p, new_tail); + } + return -EBUSY; +} + +static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt) +{ + struct ldc_packet *p; + unsigned long new_tail; + int err; + + p = data_get_tx_packet(lp, &new_tail); + if (!p) + return -EBUSY; + memset(p, 0, sizeof(*p)); + p->type = data_pkt->type; + p->stype = LDC_NACK; + p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK; + p->seqid = lp->snd_nxt + 1; + p->u.r.ackid = lp->rcv_nxt; + + ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n", + p->type, p->ctrl, p->seqid, p->u.r.ackid); + + err = send_tx_packet(lp, p, new_tail); + if (!err) + lp->snd_nxt++; + + return err; +} + +static int ldc_abort(struct ldc_channel *lp, const char *msg) +{ + unsigned long hv_err; + + ldcdbg(STATE, "ABORT[%s]\n", msg); + ldc_print(lp); + + /* We report but do not act upon the hypervisor errors because + * there really isn't much we can do if they fail at this point. + */ + hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); + if (hv_err) + printk(KERN_ERR PFX "ldc_abort: " + "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n", + lp->id, lp->tx_ra, lp->tx_num_entries, hv_err); + + hv_err = sun4v_ldc_tx_get_state(lp->id, + &lp->tx_head, + &lp->tx_tail, + &lp->chan_state); + if (hv_err) + printk(KERN_ERR PFX "ldc_abort: " + "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n", + lp->id, hv_err); + + hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); + if (hv_err) + printk(KERN_ERR PFX "ldc_abort: " + "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n", + lp->id, lp->rx_ra, lp->rx_num_entries, hv_err); + + /* Refetch the RX queue state as well, because we could be invoked + * here in the queue processing context. + */ + hv_err = sun4v_ldc_rx_get_state(lp->id, + &lp->rx_head, + &lp->rx_tail, + &lp->chan_state); + if (hv_err) + printk(KERN_ERR PFX "ldc_abort: " + "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n", + lp->id, hv_err); + + return -ECONNRESET; +} + +static struct ldc_version *find_by_major(u16 major) +{ + struct ldc_version *ret = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(ver_arr); i++) { + struct ldc_version *v = &ver_arr[i]; + if (v->major <= major) { + ret = v; + break; + } + } + return ret; +} + +static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp) +{ + struct ldc_version *vap; + int err; + + ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n", + vp->major, vp->minor); + + if (lp->hs_state == LDC_HS_GOTVERS) { + lp->hs_state = LDC_HS_OPEN; + memset(&lp->ver, 0, sizeof(lp->ver)); + } + + vap = find_by_major(vp->major); + if (!vap) { + err = send_version_nack(lp, 0, 0); + } else if (vap->major != vp->major) { + err = send_version_nack(lp, vap->major, vap->minor); + } else { + struct ldc_version ver = *vp; + if (ver.minor > vap->minor) + ver.minor = vap->minor; + err = send_version_ack(lp, &ver); + if (!err) { + lp->ver = ver; + lp->hs_state = LDC_HS_GOTVERS; + } + } + if (err) + return LDC_ABORT(lp); + + return 0; +} + +static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp) +{ + ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n", + vp->major, vp->minor); + + if (lp->hs_state == LDC_HS_GOTVERS) { + if (lp->ver.major != vp->major || + lp->ver.minor != vp->minor) + return LDC_ABORT(lp); + } else { + lp->ver = *vp; + lp->hs_state = LDC_HS_GOTVERS; + } + if (send_rts(lp)) + return LDC_ABORT(lp); + return 0; +} + +static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp) +{ + struct ldc_version *vap; + struct ldc_packet *p; + unsigned long new_tail; + + if (vp->major == 0 && vp->minor == 0) + return LDC_ABORT(lp); + + vap = find_by_major(vp->major); + if (!vap) + return LDC_ABORT(lp); + + p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, + vap, sizeof(*vap), + &new_tail); + if (!p) + return LDC_ABORT(lp); + + return send_tx_packet(lp, p, new_tail); +} + +static int process_version(struct ldc_channel *lp, + struct ldc_packet *p) +{ + struct ldc_version *vp; + + vp = (struct ldc_version *) p->u.u_data; + + switch (p->stype) { + case LDC_INFO: + return process_ver_info(lp, vp); + + case LDC_ACK: + return process_ver_ack(lp, vp); + + case LDC_NACK: + return process_ver_nack(lp, vp); + + default: + return LDC_ABORT(lp); + } +} + +static int process_rts(struct ldc_channel *lp, + struct ldc_packet *p) +{ + ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n", + p->stype, p->seqid, p->env); + + if (p->stype != LDC_INFO || + lp->hs_state != LDC_HS_GOTVERS || + p->env != lp->cfg.mode) + return LDC_ABORT(lp); + + lp->snd_nxt = p->seqid; + lp->rcv_nxt = p->seqid; + lp->hs_state = LDC_HS_SENTRTR; + if (send_rtr(lp)) + return LDC_ABORT(lp); + + return 0; +} + +static int process_rtr(struct ldc_channel *lp, + struct ldc_packet *p) +{ + ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n", + p->stype, p->seqid, p->env); + + if (p->stype != LDC_INFO || + p->env != lp->cfg.mode) + return LDC_ABORT(lp); + + lp->snd_nxt = p->seqid; + lp->hs_state = LDC_HS_COMPLETE; + ldc_set_state(lp, LDC_STATE_CONNECTED); + send_rdx(lp); + + return LDC_EVENT_UP; +} + +static int rx_seq_ok(struct ldc_channel *lp, u32 seqid) +{ + return lp->rcv_nxt + 1 == seqid; +} + +static int process_rdx(struct ldc_channel *lp, + struct ldc_packet *p) +{ + ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n", + p->stype, p->seqid, p->env, p->u.r.ackid); + + if (p->stype != LDC_INFO || + !(rx_seq_ok(lp, p->seqid))) + return LDC_ABORT(lp); + + lp->rcv_nxt = p->seqid; + + lp->hs_state = LDC_HS_COMPLETE; + ldc_set_state(lp, LDC_STATE_CONNECTED); + + return LDC_EVENT_UP; +} + +static int process_control_frame(struct ldc_channel *lp, + struct ldc_packet *p) +{ + switch (p->ctrl) { + case LDC_VERS: + return process_version(lp, p); + + case LDC_RTS: + return process_rts(lp, p); + + case LDC_RTR: + return process_rtr(lp, p); + + case LDC_RDX: + return process_rdx(lp, p); + + default: + return LDC_ABORT(lp); + } +} + +static int process_error_frame(struct ldc_channel *lp, + struct ldc_packet *p) +{ + return LDC_ABORT(lp); +} + +static int process_data_ack(struct ldc_channel *lp, + struct ldc_packet *ack) +{ + unsigned long head = lp->tx_acked; + u32 ackid = ack->u.r.ackid; + + while (1) { + struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE); + + head = tx_advance(lp, head); + + if (p->seqid == ackid) { + lp->tx_acked = head; + return 0; + } + if (head == lp->tx_tail) + return LDC_ABORT(lp); + } + + return 0; +} + +static void send_events(struct ldc_channel *lp, unsigned int event_mask) +{ + if (event_mask & LDC_EVENT_RESET) + lp->cfg.event(lp->event_arg, LDC_EVENT_RESET); + if (event_mask & LDC_EVENT_UP) + lp->cfg.event(lp->event_arg, LDC_EVENT_UP); + if (event_mask & LDC_EVENT_DATA_READY) + lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY); +} + +static irqreturn_t ldc_rx(int irq, void *dev_id) +{ + struct ldc_channel *lp = dev_id; + unsigned long orig_state, flags; + unsigned int event_mask; + + spin_lock_irqsave(&lp->lock, flags); + + orig_state = lp->chan_state; + + /* We should probably check for hypervisor errors here and + * reset the LDC channel if we get one. + */ + sun4v_ldc_rx_get_state(lp->id, + &lp->rx_head, + &lp->rx_tail, + &lp->chan_state); + + ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", + orig_state, lp->chan_state, lp->rx_head, lp->rx_tail); + + event_mask = 0; + + if (lp->cfg.mode == LDC_MODE_RAW && + lp->chan_state == LDC_CHANNEL_UP) { + lp->hs_state = LDC_HS_COMPLETE; + ldc_set_state(lp, LDC_STATE_CONNECTED); + + /* + * Generate an LDC_EVENT_UP event if the channel + * was not already up. + */ + if (orig_state != LDC_CHANNEL_UP) { + event_mask |= LDC_EVENT_UP; + orig_state = lp->chan_state; + } + } + + /* If we are in reset state, flush the RX queue and ignore + * everything. + */ + if (lp->flags & LDC_FLAG_RESET) { + (void) ldc_rx_reset(lp); + goto out; + } + + /* Once we finish the handshake, we let the ldc_read() + * paths do all of the control frame and state management. + * Just trigger the callback. + */ + if (lp->hs_state == LDC_HS_COMPLETE) { +handshake_complete: + if (lp->chan_state != orig_state) { + unsigned int event = LDC_EVENT_RESET; + + if (lp->chan_state == LDC_CHANNEL_UP) + event = LDC_EVENT_UP; + + event_mask |= event; + } + if (lp->rx_head != lp->rx_tail) + event_mask |= LDC_EVENT_DATA_READY; + + goto out; + } + + if (lp->chan_state != orig_state) + goto out; + + while (lp->rx_head != lp->rx_tail) { + struct ldc_packet *p; + unsigned long new; + int err; + + p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); + + switch (p->type) { + case LDC_CTRL: + err = process_control_frame(lp, p); + if (err > 0) + event_mask |= err; + break; + + case LDC_DATA: + event_mask |= LDC_EVENT_DATA_READY; + err = 0; + break; + + case LDC_ERR: + err = process_error_frame(lp, p); + break; + + default: + err = LDC_ABORT(lp); + break; + } + + if (err < 0) + break; + + new = lp->rx_head; + new += LDC_PACKET_SIZE; + if (new == (lp->rx_num_entries * LDC_PACKET_SIZE)) + new = 0; + lp->rx_head = new; + + err = __set_rx_head(lp, new); + if (err < 0) { + (void) LDC_ABORT(lp); + break; + } + if (lp->hs_state == LDC_HS_COMPLETE) + goto handshake_complete; + } + +out: + spin_unlock_irqrestore(&lp->lock, flags); + + send_events(lp, event_mask); + + return IRQ_HANDLED; +} + +static irqreturn_t ldc_tx(int irq, void *dev_id) +{ + struct ldc_channel *lp = dev_id; + unsigned long flags, orig_state; + unsigned int event_mask = 0; + + spin_lock_irqsave(&lp->lock, flags); + + orig_state = lp->chan_state; + + /* We should probably check for hypervisor errors here and + * reset the LDC channel if we get one. + */ + sun4v_ldc_tx_get_state(lp->id, + &lp->tx_head, + &lp->tx_tail, + &lp->chan_state); + + ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", + orig_state, lp->chan_state, lp->tx_head, lp->tx_tail); + + if (lp->cfg.mode == LDC_MODE_RAW && + lp->chan_state == LDC_CHANNEL_UP) { + lp->hs_state = LDC_HS_COMPLETE; + ldc_set_state(lp, LDC_STATE_CONNECTED); + + /* + * Generate an LDC_EVENT_UP event if the channel + * was not already up. + */ + if (orig_state != LDC_CHANNEL_UP) { + event_mask |= LDC_EVENT_UP; + orig_state = lp->chan_state; + } + } + + spin_unlock_irqrestore(&lp->lock, flags); + + send_events(lp, event_mask); + + return IRQ_HANDLED; +} + +/* XXX ldc_alloc() and ldc_free() needs to run under a mutex so + * XXX that addition and removal from the ldc_channel_list has + * XXX atomicity, otherwise the __ldc_channel_exists() check is + * XXX totally pointless as another thread can slip into ldc_alloc() + * XXX and add a channel with the same ID. There also needs to be + * XXX a spinlock for ldc_channel_list. + */ +static HLIST_HEAD(ldc_channel_list); + +static int __ldc_channel_exists(unsigned long id) +{ + struct ldc_channel *lp; + + hlist_for_each_entry(lp, &ldc_channel_list, list) { + if (lp->id == id) + return 1; + } + return 0; +} + +static int alloc_queue(const char *name, unsigned long num_entries, + struct ldc_packet **base, unsigned long *ra) +{ + unsigned long size, order; + void *q; + + size = num_entries * LDC_PACKET_SIZE; + order = get_order(size); + + q = (void *) __get_free_pages(GFP_KERNEL, order); + if (!q) { + printk(KERN_ERR PFX "Alloc of %s queue failed with " + "size=%lu order=%lu\n", name, size, order); + return -ENOMEM; + } + + memset(q, 0, PAGE_SIZE << order); + + *base = q; + *ra = __pa(q); + + return 0; +} + +static void free_queue(unsigned long num_entries, struct ldc_packet *q) +{ + unsigned long size, order; + + if (!q) + return; + + size = num_entries * LDC_PACKET_SIZE; + order = get_order(size); + + free_pages((unsigned long)q, order); +} + +static unsigned long ldc_cookie_to_index(u64 cookie, void *arg) +{ + u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT; + /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */ + + cookie &= ~COOKIE_PGSZ_CODE; + + return (cookie >> (13ULL + (szcode * 3ULL))); +} + +static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie, + unsigned long entry, unsigned long npages) +{ + struct ldc_mtable_entry *base; + unsigned long i, shift; + + shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3; + base = iommu->page_table + entry; + for (i = 0; i < npages; i++) { + if (base->cookie) + sun4v_ldc_revoke(id, cookie + (i << shift), + base->cookie); + base->mte = 0; + } +} + +/* XXX Make this configurable... XXX */ +#define LDC_IOTABLE_SIZE (8 * 1024) + +static int ldc_iommu_init(const char *name, struct ldc_channel *lp) +{ + unsigned long sz, num_tsb_entries, tsbsize, order; + struct ldc_iommu *ldc_iommu = &lp->iommu; + struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table; + struct ldc_mtable_entry *table; + unsigned long hv_err; + int err; + + num_tsb_entries = LDC_IOTABLE_SIZE; + tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); + spin_lock_init(&ldc_iommu->lock); + + sz = num_tsb_entries / 8; + sz = (sz + 7UL) & ~7UL; + iommu->map = kzalloc(sz, GFP_KERNEL); + if (!iommu->map) { + printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz); + return -ENOMEM; + } + iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT, + NULL, false /* no large pool */, + 1 /* npools */, + true /* skip span boundary check */); + + order = get_order(tsbsize); + + table = (struct ldc_mtable_entry *) + __get_free_pages(GFP_KERNEL, order); + err = -ENOMEM; + if (!table) { + printk(KERN_ERR PFX "Alloc of MTE table failed, " + "size=%lu order=%lu\n", tsbsize, order); + goto out_free_map; + } + + memset(table, 0, PAGE_SIZE << order); + + ldc_iommu->page_table = table; + + hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table), + num_tsb_entries); + err = -EINVAL; + if (hv_err) + goto out_free_table; + + return 0; + +out_free_table: + free_pages((unsigned long) table, order); + ldc_iommu->page_table = NULL; + +out_free_map: + kfree(iommu->map); + iommu->map = NULL; + + return err; +} + +static void ldc_iommu_release(struct ldc_channel *lp) +{ + struct ldc_iommu *ldc_iommu = &lp->iommu; + struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table; + unsigned long num_tsb_entries, tsbsize, order; + + (void) sun4v_ldc_set_map_table(lp->id, 0, 0); + + num_tsb_entries = iommu->poolsize * iommu->nr_pools; + tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); + order = get_order(tsbsize); + + free_pages((unsigned long) ldc_iommu->page_table, order); + ldc_iommu->page_table = NULL; + + kfree(iommu->map); + iommu->map = NULL; +} + +struct ldc_channel *ldc_alloc(unsigned long id, + const struct ldc_channel_config *cfgp, + void *event_arg, + const char *name) +{ + struct ldc_channel *lp; + const struct ldc_mode_ops *mops; + unsigned long dummy1, dummy2, hv_err; + u8 mss, *mssbuf; + int err; + + err = -ENODEV; + if (!ldom_domaining_enabled) + goto out_err; + + err = -EINVAL; + if (!cfgp) + goto out_err; + if (!name) + goto out_err; + + switch (cfgp->mode) { + case LDC_MODE_RAW: + mops = &raw_ops; + mss = LDC_PACKET_SIZE; + break; + + case LDC_MODE_UNRELIABLE: + mops = &nonraw_ops; + mss = LDC_PACKET_SIZE - 8; + break; + + case LDC_MODE_STREAM: + mops = &stream_ops; + mss = LDC_PACKET_SIZE - 8 - 8; + break; + + default: + goto out_err; + } + + if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq) + goto out_err; + + hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2); + err = -ENODEV; + if (hv_err == HV_ECHANNEL) + goto out_err; + + err = -EEXIST; + if (__ldc_channel_exists(id)) + goto out_err; + + mssbuf = NULL; + + lp = kzalloc(sizeof(*lp), GFP_KERNEL); + err = -ENOMEM; + if (!lp) + goto out_err; + + spin_lock_init(&lp->lock); + + lp->id = id; + + err = ldc_iommu_init(name, lp); + if (err) + goto out_free_ldc; + + lp->mops = mops; + lp->mss = mss; + + lp->cfg = *cfgp; + if (!lp->cfg.mtu) + lp->cfg.mtu = LDC_DEFAULT_MTU; + + if (lp->cfg.mode == LDC_MODE_STREAM) { + mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL); + if (!mssbuf) { + err = -ENOMEM; + goto out_free_iommu; + } + lp->mssbuf = mssbuf; + } + + lp->event_arg = event_arg; + + /* XXX allow setting via ldc_channel_config to override defaults + * XXX or use some formula based upon mtu + */ + lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES; + lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES; + + err = alloc_queue("TX", lp->tx_num_entries, + &lp->tx_base, &lp->tx_ra); + if (err) + goto out_free_mssbuf; + + err = alloc_queue("RX", lp->rx_num_entries, + &lp->rx_base, &lp->rx_ra); + if (err) + goto out_free_txq; + + lp->flags |= LDC_FLAG_ALLOCED_QUEUES; + + lp->hs_state = LDC_HS_CLOSED; + ldc_set_state(lp, LDC_STATE_INIT); + + INIT_HLIST_NODE(&lp->list); + hlist_add_head(&lp->list, &ldc_channel_list); + + INIT_HLIST_HEAD(&lp->mh_list); + + snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); + snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); + + err = request_irq(lp->cfg.rx_irq, ldc_rx, 0, + lp->rx_irq_name, lp); + if (err) + goto out_free_txq; + + err = request_irq(lp->cfg.tx_irq, ldc_tx, 0, + lp->tx_irq_name, lp); + if (err) { + free_irq(lp->cfg.rx_irq, lp); + goto out_free_txq; + } + + return lp; + +out_free_txq: + free_queue(lp->tx_num_entries, lp->tx_base); + +out_free_mssbuf: + kfree(mssbuf); + +out_free_iommu: + ldc_iommu_release(lp); + +out_free_ldc: + kfree(lp); + +out_err: + return ERR_PTR(err); +} +EXPORT_SYMBOL(ldc_alloc); + +void ldc_unbind(struct ldc_channel *lp) +{ + if (lp->flags & LDC_FLAG_REGISTERED_IRQS) { + free_irq(lp->cfg.rx_irq, lp); + free_irq(lp->cfg.tx_irq, lp); + lp->flags &= ~LDC_FLAG_REGISTERED_IRQS; + } + + if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) { + sun4v_ldc_tx_qconf(lp->id, 0, 0); + sun4v_ldc_rx_qconf(lp->id, 0, 0); + lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES; + } + if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) { + free_queue(lp->tx_num_entries, lp->tx_base); + free_queue(lp->rx_num_entries, lp->rx_base); + lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES; + } + + ldc_set_state(lp, LDC_STATE_INIT); +} +EXPORT_SYMBOL(ldc_unbind); + +void ldc_free(struct ldc_channel *lp) +{ + ldc_unbind(lp); + hlist_del(&lp->list); + kfree(lp->mssbuf); + ldc_iommu_release(lp); + + kfree(lp); +} +EXPORT_SYMBOL(ldc_free); + +/* Bind the channel. This registers the LDC queues with + * the hypervisor and puts the channel into a pseudo-listening + * state. This does not initiate a handshake, ldc_connect() does + * that. + */ +int ldc_bind(struct ldc_channel *lp) +{ + unsigned long hv_err, flags; + int err = -EINVAL; + + if (lp->state != LDC_STATE_INIT) + return -EINVAL; + + spin_lock_irqsave(&lp->lock, flags); + + enable_irq(lp->cfg.rx_irq); + enable_irq(lp->cfg.tx_irq); + + lp->flags |= LDC_FLAG_REGISTERED_IRQS; + + err = -ENODEV; + hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); + if (hv_err) + goto out_free_irqs; + + hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); + if (hv_err) + goto out_free_irqs; + + hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); + if (hv_err) + goto out_unmap_tx; + + hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); + if (hv_err) + goto out_unmap_tx; + + lp->flags |= LDC_FLAG_REGISTERED_QUEUES; + + hv_err = sun4v_ldc_tx_get_state(lp->id, + &lp->tx_head, + &lp->tx_tail, + &lp->chan_state); + err = -EBUSY; + if (hv_err) + goto out_unmap_rx; + + lp->tx_acked = lp->tx_head; + + lp->hs_state = LDC_HS_OPEN; + ldc_set_state(lp, LDC_STATE_BOUND); + + if (lp->cfg.mode == LDC_MODE_RAW) { + /* + * There is no handshake in RAW mode, so handshake + * is completed. + */ + lp->hs_state = LDC_HS_COMPLETE; + } + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; + +out_unmap_rx: + lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES; + sun4v_ldc_rx_qconf(lp->id, 0, 0); + +out_unmap_tx: + sun4v_ldc_tx_qconf(lp->id, 0, 0); + +out_free_irqs: + lp->flags &= ~LDC_FLAG_REGISTERED_IRQS; + free_irq(lp->cfg.tx_irq, lp); + free_irq(lp->cfg.rx_irq, lp); + + spin_unlock_irqrestore(&lp->lock, flags); + + return err; +} +EXPORT_SYMBOL(ldc_bind); + +int ldc_connect(struct ldc_channel *lp) +{ + unsigned long flags; + int err; + + if (lp->cfg.mode == LDC_MODE_RAW) + return -EINVAL; + + spin_lock_irqsave(&lp->lock, flags); + + if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || + !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || + lp->hs_state != LDC_HS_OPEN) + err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL); + else + err = start_handshake(lp); + + spin_unlock_irqrestore(&lp->lock, flags); + + return err; +} +EXPORT_SYMBOL(ldc_connect); + +int ldc_disconnect(struct ldc_channel *lp) +{ + unsigned long hv_err, flags; + int err; + + if (lp->cfg.mode == LDC_MODE_RAW) + return -EINVAL; + + if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || + !(lp->flags & LDC_FLAG_REGISTERED_QUEUES)) + return -EINVAL; + + spin_lock_irqsave(&lp->lock, flags); + + err = -ENODEV; + hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); + if (hv_err) + goto out_err; + + hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); + if (hv_err) + goto out_err; + + hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); + if (hv_err) + goto out_err; + + hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); + if (hv_err) + goto out_err; + + ldc_set_state(lp, LDC_STATE_BOUND); + lp->hs_state = LDC_HS_OPEN; + lp->flags |= LDC_FLAG_RESET; + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; + +out_err: + sun4v_ldc_tx_qconf(lp->id, 0, 0); + sun4v_ldc_rx_qconf(lp->id, 0, 0); + free_irq(lp->cfg.tx_irq, lp); + free_irq(lp->cfg.rx_irq, lp); + lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS | + LDC_FLAG_REGISTERED_QUEUES); + ldc_set_state(lp, LDC_STATE_INIT); + + spin_unlock_irqrestore(&lp->lock, flags); + + return err; +} +EXPORT_SYMBOL(ldc_disconnect); + +int ldc_state(struct ldc_channel *lp) +{ + return lp->state; +} +EXPORT_SYMBOL(ldc_state); + +void ldc_set_state(struct ldc_channel *lp, u8 state) +{ + ldcdbg(STATE, "STATE (%s) --> (%s)\n", + state_to_str(lp->state), + state_to_str(state)); + + lp->state = state; +} +EXPORT_SYMBOL(ldc_set_state); + +int ldc_mode(struct ldc_channel *lp) +{ + return lp->cfg.mode; +} +EXPORT_SYMBOL(ldc_mode); + +int ldc_rx_reset(struct ldc_channel *lp) +{ + return __set_rx_head(lp, lp->rx_tail); +} +EXPORT_SYMBOL(ldc_rx_reset); + +void __ldc_print(struct ldc_channel *lp, const char *caller) +{ + pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n" + "\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n" + "\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n" + "\trcv_nxt=%u snd_nxt=%u\n", + caller, lp->id, lp->flags, state_to_str(lp->state), + lp->chan_state, lp->hs_state, + lp->rx_head, lp->rx_tail, lp->rx_num_entries, + lp->tx_head, lp->tx_tail, lp->tx_num_entries, + lp->rcv_nxt, lp->snd_nxt); +} +EXPORT_SYMBOL(__ldc_print); + +static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) +{ + struct ldc_packet *p; + unsigned long new_tail, hv_err; + int err; + + hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail, + &lp->chan_state); + if (unlikely(hv_err)) + return -EBUSY; + + if (unlikely(lp->chan_state != LDC_CHANNEL_UP)) + return LDC_ABORT(lp); + + if (size > LDC_PACKET_SIZE) + return -EMSGSIZE; + + p = data_get_tx_packet(lp, &new_tail); + if (!p) + return -EAGAIN; + + memcpy(p, buf, size); + + err = send_tx_packet(lp, p, new_tail); + if (!err) + err = size; + + return err; +} + +static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size) +{ + struct ldc_packet *p; + unsigned long hv_err, new; + int err; + + if (size < LDC_PACKET_SIZE) + return -EINVAL; + + hv_err = sun4v_ldc_rx_get_state(lp->id, + &lp->rx_head, + &lp->rx_tail, + &lp->chan_state); + if (hv_err) + return LDC_ABORT(lp); + + if (lp->chan_state == LDC_CHANNEL_DOWN || + lp->chan_state == LDC_CHANNEL_RESETTING) + return -ECONNRESET; + + if (lp->rx_head == lp->rx_tail) + return 0; + + p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); + memcpy(buf, p, LDC_PACKET_SIZE); + + new = rx_advance(lp, lp->rx_head); + lp->rx_head = new; + + err = __set_rx_head(lp, new); + if (err < 0) + err = -ECONNRESET; + else + err = LDC_PACKET_SIZE; + + return err; +} + +static const struct ldc_mode_ops raw_ops = { + .write = write_raw, + .read = read_raw, +}; + +static int write_nonraw(struct ldc_channel *lp, const void *buf, + unsigned int size) +{ + unsigned long hv_err, tail; + unsigned int copied; + u32 seq; + int err; + + hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail, + &lp->chan_state); + if (unlikely(hv_err)) + return -EBUSY; + + if (unlikely(lp->chan_state != LDC_CHANNEL_UP)) + return LDC_ABORT(lp); + + if (!tx_has_space_for(lp, size)) + return -EAGAIN; + + seq = lp->snd_nxt; + copied = 0; + tail = lp->tx_tail; + while (copied < size) { + struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE); + u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ? + p->u.u_data : + p->u.r.r_data); + int data_len; + + p->type = LDC_DATA; + p->stype = LDC_INFO; + p->ctrl = 0; + + data_len = size - copied; + if (data_len > lp->mss) + data_len = lp->mss; + + BUG_ON(data_len > LDC_LEN); + + p->env = (data_len | + (copied == 0 ? LDC_START : 0) | + (data_len == size - copied ? LDC_STOP : 0)); + + p->seqid = ++seq; + + ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n", + p->type, + p->stype, + p->ctrl, + p->env, + p->seqid); + + memcpy(data, buf, data_len); + buf += data_len; + copied += data_len; + + tail = tx_advance(lp, tail); + } + + err = set_tx_tail(lp, tail); + if (!err) { + lp->snd_nxt = seq; + err = size; + } + + return err; +} + +static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p, + struct ldc_packet *first_frag) +{ + int err; + + if (first_frag) + lp->rcv_nxt = first_frag->seqid - 1; + + err = send_data_nack(lp, p); + if (err) + return err; + + err = ldc_rx_reset(lp); + if (err < 0) + return LDC_ABORT(lp); + + return 0; +} + +static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p) +{ + if (p->stype & LDC_ACK) { + int err = process_data_ack(lp, p); + if (err) + return err; + } + if (p->stype & LDC_NACK) + return LDC_ABORT(lp); + + return 0; +} + +static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head) +{ + unsigned long dummy; + int limit = 1000; + + ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n", + cur_head, lp->rx_head, lp->rx_tail); + while (limit-- > 0) { + unsigned long hv_err; + + hv_err = sun4v_ldc_rx_get_state(lp->id, + &dummy, + &lp->rx_tail, + &lp->chan_state); + if (hv_err) + return LDC_ABORT(lp); + + if (lp->chan_state == LDC_CHANNEL_DOWN || + lp->chan_state == LDC_CHANNEL_RESETTING) + return -ECONNRESET; + + if (cur_head != lp->rx_tail) { + ldcdbg(DATA, "DATA WAIT DONE " + "head[%lx] tail[%lx] chan_state[%lx]\n", + dummy, lp->rx_tail, lp->chan_state); + return 0; + } + + udelay(1); + } + return -EAGAIN; +} + +static int rx_set_head(struct ldc_channel *lp, unsigned long head) +{ + int err = __set_rx_head(lp, head); + + if (err < 0) + return LDC_ABORT(lp); + + lp->rx_head = head; + return 0; +} + +static void send_data_ack(struct ldc_channel *lp) +{ + unsigned long new_tail; + struct ldc_packet *p; + + p = data_get_tx_packet(lp, &new_tail); + if (likely(p)) { + int err; + + memset(p, 0, sizeof(*p)); + p->type = LDC_DATA; + p->stype = LDC_ACK; + p->ctrl = 0; + p->seqid = lp->snd_nxt + 1; + p->u.r.ackid = lp->rcv_nxt; + + err = send_tx_packet(lp, p, new_tail); + if (!err) + lp->snd_nxt++; + } +} + +static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size) +{ + struct ldc_packet *first_frag; + unsigned long hv_err, new; + int err, copied; + + hv_err = sun4v_ldc_rx_get_state(lp->id, + &lp->rx_head, + &lp->rx_tail, + &lp->chan_state); + if (hv_err) + return LDC_ABORT(lp); + + if (lp->chan_state == LDC_CHANNEL_DOWN || + lp->chan_state == LDC_CHANNEL_RESETTING) + return -ECONNRESET; + + if (lp->rx_head == lp->rx_tail) + return 0; + + first_frag = NULL; + copied = err = 0; + new = lp->rx_head; + while (1) { + struct ldc_packet *p; + int pkt_len; + + BUG_ON(new == lp->rx_tail); + p = lp->rx_base + (new / LDC_PACKET_SIZE); + + ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] " + "rcv_nxt[%08x]\n", + p->type, + p->stype, + p->ctrl, + p->env, + p->seqid, + p->u.r.ackid, + lp->rcv_nxt); + + if (unlikely(!rx_seq_ok(lp, p->seqid))) { + err = rx_bad_seq(lp, p, first_frag); + copied = 0; + break; + } + + if (p->type & LDC_CTRL) { + err = process_control_frame(lp, p); + if (err < 0) + break; + err = 0; + } + + lp->rcv_nxt = p->seqid; + + /* + * If this is a control-only packet, there is nothing + * else to do but advance the rx queue since the packet + * was already processed above. + */ + if (!(p->type & LDC_DATA)) { + new = rx_advance(lp, new); + break; + } + if (p->stype & (LDC_ACK | LDC_NACK)) { + err = data_ack_nack(lp, p); + if (err) + break; + } + if (!(p->stype & LDC_INFO)) { + new = rx_advance(lp, new); + err = rx_set_head(lp, new); + if (err) + break; + goto no_data; + } + + pkt_len = p->env & LDC_LEN; + + /* Every initial packet starts with the START bit set. + * + * Singleton packets will have both START+STOP set. + * + * Fragments will have START set in the first frame, STOP + * set in the last frame, and neither bit set in middle + * frames of the packet. + * + * Therefore if we are at the beginning of a packet and + * we don't see START, or we are in the middle of a fragmented + * packet and do see START, we are unsynchronized and should + * flush the RX queue. + */ + if ((first_frag == NULL && !(p->env & LDC_START)) || + (first_frag != NULL && (p->env & LDC_START))) { + if (!first_frag) + new = rx_advance(lp, new); + + err = rx_set_head(lp, new); + if (err) + break; + + if (!first_frag) + goto no_data; + } + if (!first_frag) + first_frag = p; + + if (pkt_len > size - copied) { + /* User didn't give us a big enough buffer, + * what to do? This is a pretty serious error. + * + * Since we haven't updated the RX ring head to + * consume any of the packets, signal the error + * to the user and just leave the RX ring alone. + * + * This seems the best behavior because this allows + * a user of the LDC layer to start with a small + * RX buffer for ldc_read() calls and use -EMSGSIZE + * as a cue to enlarge it's read buffer. + */ + err = -EMSGSIZE; + break; + } + + /* Ok, we are gonna eat this one. */ + new = rx_advance(lp, new); + + memcpy(buf, + (lp->cfg.mode == LDC_MODE_UNRELIABLE ? + p->u.u_data : p->u.r.r_data), pkt_len); + buf += pkt_len; + copied += pkt_len; + + if (p->env & LDC_STOP) + break; + +no_data: + if (new == lp->rx_tail) { + err = rx_data_wait(lp, new); + if (err) + break; + } + } + + if (!err) + err = rx_set_head(lp, new); + + if (err && first_frag) + lp->rcv_nxt = first_frag->seqid - 1; + + if (!err) { + err = copied; + if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE) + send_data_ack(lp); + } + + return err; +} + +static const struct ldc_mode_ops nonraw_ops = { + .write = write_nonraw, + .read = read_nonraw, +}; + +static int write_stream(struct ldc_channel *lp, const void *buf, + unsigned int size) +{ + if (size > lp->cfg.mtu) + size = lp->cfg.mtu; + return write_nonraw(lp, buf, size); +} + +static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size) +{ + if (!lp->mssbuf_len) { + int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu); + if (err < 0) + return err; + + lp->mssbuf_len = err; + lp->mssbuf_off = 0; + } + + if (size > lp->mssbuf_len) + size = lp->mssbuf_len; + memcpy(buf, lp->mssbuf + lp->mssbuf_off, size); + + lp->mssbuf_off += size; + lp->mssbuf_len -= size; + + return size; +} + +static const struct ldc_mode_ops stream_ops = { + .write = write_stream, + .read = read_stream, +}; + +int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size) +{ + unsigned long flags; + int err; + + if (!buf) + return -EINVAL; + + if (!size) + return 0; + + spin_lock_irqsave(&lp->lock, flags); + + if (lp->hs_state != LDC_HS_COMPLETE) + err = -ENOTCONN; + else + err = lp->mops->write(lp, buf, size); + + spin_unlock_irqrestore(&lp->lock, flags); + + return err; +} +EXPORT_SYMBOL(ldc_write); + +int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size) +{ + unsigned long flags; + int err; + + ldcdbg(RX, "%s: entered size=%d\n", __func__, size); + + if (!buf) + return -EINVAL; + + if (!size) + return 0; + + spin_lock_irqsave(&lp->lock, flags); + + if (lp->hs_state != LDC_HS_COMPLETE) + err = -ENOTCONN; + else + err = lp->mops->read(lp, buf, size); + + spin_unlock_irqrestore(&lp->lock, flags); + + ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__, + lp->cfg.mode, lp->rx_head, lp->rx_tail, err); + + return err; +} +EXPORT_SYMBOL(ldc_read); + +static u64 pagesize_code(void) +{ + switch (PAGE_SIZE) { + default: + case (8ULL * 1024ULL): + return 0; + case (64ULL * 1024ULL): + return 1; + case (512ULL * 1024ULL): + return 2; + case (4ULL * 1024ULL * 1024ULL): + return 3; + case (32ULL * 1024ULL * 1024ULL): + return 4; + case (256ULL * 1024ULL * 1024ULL): + return 5; + } +} + +static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset) +{ + return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) | + (index << PAGE_SHIFT) | + page_offset); +} + + +static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, + unsigned long npages) +{ + long entry; + + entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table, + npages, NULL, (unsigned long)-1, 0); + if (unlikely(entry == IOMMU_ERROR_CODE)) + return NULL; + + return iommu->page_table + entry; +} + +static u64 perm_to_mte(unsigned int map_perm) +{ + u64 mte_base; + + mte_base = pagesize_code(); + + if (map_perm & LDC_MAP_SHADOW) { + if (map_perm & LDC_MAP_R) + mte_base |= LDC_MTE_COPY_R; + if (map_perm & LDC_MAP_W) + mte_base |= LDC_MTE_COPY_W; + } + if (map_perm & LDC_MAP_DIRECT) { + if (map_perm & LDC_MAP_R) + mte_base |= LDC_MTE_READ; + if (map_perm & LDC_MAP_W) + mte_base |= LDC_MTE_WRITE; + if (map_perm & LDC_MAP_X) + mte_base |= LDC_MTE_EXEC; + } + if (map_perm & LDC_MAP_IO) { + if (map_perm & LDC_MAP_R) + mte_base |= LDC_MTE_IOMMU_R; + if (map_perm & LDC_MAP_W) + mte_base |= LDC_MTE_IOMMU_W; + } + + return mte_base; +} + +static int pages_in_region(unsigned long base, long len) +{ + int count = 0; + + do { + unsigned long new = (base + PAGE_SIZE) & PAGE_MASK; + + len -= (new - base); + base = new; + count++; + } while (len > 0); + + return count; +} + +struct cookie_state { + struct ldc_mtable_entry *page_table; + struct ldc_trans_cookie *cookies; + u64 mte_base; + u64 prev_cookie; + u32 pte_idx; + u32 nc; +}; + +static void fill_cookies(struct cookie_state *sp, unsigned long pa, + unsigned long off, unsigned long len) +{ + do { + unsigned long tlen, new = pa + PAGE_SIZE; + u64 this_cookie; + + sp->page_table[sp->pte_idx].mte = sp->mte_base | pa; + + tlen = PAGE_SIZE; + if (off) + tlen = PAGE_SIZE - off; + if (tlen > len) + tlen = len; + + this_cookie = make_cookie(sp->pte_idx, + pagesize_code(), off); + + off = 0; + + if (this_cookie == sp->prev_cookie) { + sp->cookies[sp->nc - 1].cookie_size += tlen; + } else { + sp->cookies[sp->nc].cookie_addr = this_cookie; + sp->cookies[sp->nc].cookie_size = tlen; + sp->nc++; + } + sp->prev_cookie = this_cookie + tlen; + + sp->pte_idx++; + + len -= tlen; + pa = new; + } while (len > 0); +} + +static int sg_count_one(struct scatterlist *sg) +{ + unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT; + long len = sg->length; + + if ((sg->offset | len) & (8UL - 1)) + return -EFAULT; + + return pages_in_region(base + sg->offset, len); +} + +static int sg_count_pages(struct scatterlist *sg, int num_sg) +{ + int count; + int i; + + count = 0; + for (i = 0; i < num_sg; i++) { + int err = sg_count_one(sg + i); + if (err < 0) + return err; + count += err; + } + + return count; +} + +int ldc_map_sg(struct ldc_channel *lp, + struct scatterlist *sg, int num_sg, + struct ldc_trans_cookie *cookies, int ncookies, + unsigned int map_perm) +{ + unsigned long i, npages; + struct ldc_mtable_entry *base; + struct cookie_state state; + struct ldc_iommu *iommu; + int err; + struct scatterlist *s; + + if (map_perm & ~LDC_MAP_ALL) + return -EINVAL; + + err = sg_count_pages(sg, num_sg); + if (err < 0) + return err; + + npages = err; + if (err > ncookies) + return -EMSGSIZE; + + iommu = &lp->iommu; + + base = alloc_npages(iommu, npages); + + if (!base) + return -ENOMEM; + + state.page_table = iommu->page_table; + state.cookies = cookies; + state.mte_base = perm_to_mte(map_perm); + state.prev_cookie = ~(u64)0; + state.pte_idx = (base - iommu->page_table); + state.nc = 0; + + for_each_sg(sg, s, num_sg, i) { + fill_cookies(&state, page_to_pfn(sg_page(s)) << PAGE_SHIFT, + s->offset, s->length); + } + + return state.nc; +} +EXPORT_SYMBOL(ldc_map_sg); + +int ldc_map_single(struct ldc_channel *lp, + void *buf, unsigned int len, + struct ldc_trans_cookie *cookies, int ncookies, + unsigned int map_perm) +{ + unsigned long npages, pa; + struct ldc_mtable_entry *base; + struct cookie_state state; + struct ldc_iommu *iommu; + + if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1)) + return -EINVAL; + + pa = __pa(buf); + if ((pa | len) & (8UL - 1)) + return -EFAULT; + + npages = pages_in_region(pa, len); + + iommu = &lp->iommu; + + base = alloc_npages(iommu, npages); + + if (!base) + return -ENOMEM; + + state.page_table = iommu->page_table; + state.cookies = cookies; + state.mte_base = perm_to_mte(map_perm); + state.prev_cookie = ~(u64)0; + state.pte_idx = (base - iommu->page_table); + state.nc = 0; + fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len); + BUG_ON(state.nc > ncookies); + + return state.nc; +} +EXPORT_SYMBOL(ldc_map_single); + + +static void free_npages(unsigned long id, struct ldc_iommu *iommu, + u64 cookie, u64 size) +{ + unsigned long npages, entry; + + npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; + + entry = ldc_cookie_to_index(cookie, iommu); + ldc_demap(iommu, id, cookie, entry, npages); + iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry); +} + +void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, + int ncookies) +{ + struct ldc_iommu *iommu = &lp->iommu; + int i; + unsigned long flags; + + spin_lock_irqsave(&iommu->lock, flags); + for (i = 0; i < ncookies; i++) { + u64 addr = cookies[i].cookie_addr; + u64 size = cookies[i].cookie_size; + + free_npages(lp->id, iommu, addr, size); + } + spin_unlock_irqrestore(&iommu->lock, flags); +} +EXPORT_SYMBOL(ldc_unmap); + +int ldc_copy(struct ldc_channel *lp, int copy_dir, + void *buf, unsigned int len, unsigned long offset, + struct ldc_trans_cookie *cookies, int ncookies) +{ + unsigned int orig_len; + unsigned long ra; + int i; + + if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) { + printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n", + lp->id, copy_dir); + return -EINVAL; + } + + ra = __pa(buf); + if ((ra | len | offset) & (8UL - 1)) { + printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer " + "ra[%lx] len[%x] offset[%lx]\n", + lp->id, ra, len, offset); + return -EFAULT; + } + + if (lp->hs_state != LDC_HS_COMPLETE || + (lp->flags & LDC_FLAG_RESET)) { + printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] " + "flags[%x]\n", lp->id, lp->hs_state, lp->flags); + return -ECONNRESET; + } + + orig_len = len; + for (i = 0; i < ncookies; i++) { + unsigned long cookie_raddr = cookies[i].cookie_addr; + unsigned long this_len = cookies[i].cookie_size; + unsigned long actual_len; + + if (unlikely(offset)) { + unsigned long this_off = offset; + + if (this_off > this_len) + this_off = this_len; + + offset -= this_off; + this_len -= this_off; + if (!this_len) + continue; + cookie_raddr += this_off; + } + + if (this_len > len) + this_len = len; + + while (1) { + unsigned long hv_err; + + hv_err = sun4v_ldc_copy(lp->id, copy_dir, + cookie_raddr, ra, + this_len, &actual_len); + if (unlikely(hv_err)) { + printk(KERN_ERR PFX "ldc_copy: ID[%lu] " + "HV error %lu\n", + lp->id, hv_err); + if (lp->hs_state != LDC_HS_COMPLETE || + (lp->flags & LDC_FLAG_RESET)) + return -ECONNRESET; + else + return -EFAULT; + } + + cookie_raddr += actual_len; + ra += actual_len; + len -= actual_len; + if (actual_len == this_len) + break; + + this_len -= actual_len; + } + + if (!len) + break; + } + + /* It is caller policy what to do about short copies. + * For example, a networking driver can declare the + * packet a runt and drop it. + */ + + return orig_len - len; +} +EXPORT_SYMBOL(ldc_copy); + +void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len, + struct ldc_trans_cookie *cookies, int *ncookies, + unsigned int map_perm) +{ + void *buf; + int err; + + if (len & (8UL - 1)) + return ERR_PTR(-EINVAL); + + buf = kzalloc(len, GFP_ATOMIC); + if (!buf) + return ERR_PTR(-ENOMEM); + + err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm); + if (err < 0) { + kfree(buf); + return ERR_PTR(err); + } + *ncookies = err; + + return buf; +} +EXPORT_SYMBOL(ldc_alloc_exp_dring); + +void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len, + struct ldc_trans_cookie *cookies, int ncookies) +{ + ldc_unmap(lp, cookies, ncookies); + kfree(buf); +} +EXPORT_SYMBOL(ldc_free_exp_dring); + +static int __init ldc_init(void) +{ + unsigned long major, minor; + struct mdesc_handle *hp; + const u64 *v; + int err; + u64 mp; + + hp = mdesc_grab(); + if (!hp) + return -ENODEV; + + mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); + err = -ENODEV; + if (mp == MDESC_NODE_NULL) + goto out; + + v = mdesc_get_property(hp, mp, "domaining-enabled", NULL); + if (!v) + goto out; + + major = 1; + minor = 0; + if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) { + printk(KERN_INFO PFX "Could not register LDOM hvapi.\n"); + goto out; + } + + printk(KERN_INFO "%s", version); + + if (!*v) { + printk(KERN_INFO PFX "Domaining disabled.\n"); + goto out; + } + ldom_domaining_enabled = 1; + err = 0; + +out: + mdesc_release(hp); + return err; +} + +core_initcall(ldc_init); diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c new file mode 100644 index 000000000..ab657b359 --- /dev/null +++ b/arch/sparc/kernel/led.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define LED_MAX_LENGTH 8 /* maximum chars written to proc file */ + +static inline void led_toggle(void) +{ + unsigned char val = get_auxio(); + unsigned char on, off; + + if (val & AUXIO_LED) { + on = 0; + off = AUXIO_LED; + } else { + on = AUXIO_LED; + off = 0; + } + + set_auxio(on, off); +} + +static struct timer_list led_blink_timer; +static unsigned long led_blink_timer_timeout; + +static void led_blink(struct timer_list *unused) +{ + unsigned long timeout = led_blink_timer_timeout; + + led_toggle(); + + /* reschedule */ + if (!timeout) { /* blink according to load */ + led_blink_timer.expires = jiffies + + ((1 + (avenrun[0] >> FSHIFT)) * HZ); + } else { /* blink at user specified interval */ + led_blink_timer.expires = jiffies + (timeout * HZ); + } + add_timer(&led_blink_timer); +} + +#ifdef CONFIG_PROC_FS +static int led_proc_show(struct seq_file *m, void *v) +{ + if (get_auxio() & AUXIO_LED) + seq_puts(m, "on\n"); + else + seq_puts(m, "off\n"); + return 0; +} + +static int led_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, led_proc_show, NULL); +} + +static ssize_t led_proc_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char *buf = NULL; + + if (count > LED_MAX_LENGTH) + count = LED_MAX_LENGTH; + + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* work around \n when echo'ing into proc */ + if (buf[count - 1] == '\n') + buf[count - 1] = '\0'; + + /* before we change anything we want to stop any running timers, + * otherwise calls such as on will have no persistent effect + */ + del_timer_sync(&led_blink_timer); + + if (!strcmp(buf, "on")) { + auxio_set_led(AUXIO_LED_ON); + } else if (!strcmp(buf, "toggle")) { + led_toggle(); + } else if ((*buf > '0') && (*buf <= '9')) { + led_blink_timer_timeout = simple_strtoul(buf, NULL, 10); + led_blink(&led_blink_timer); + } else if (!strcmp(buf, "load")) { + led_blink_timer_timeout = 0; + led_blink(&led_blink_timer); + } else { + auxio_set_led(AUXIO_LED_OFF); + } + + kfree(buf); + + return count; +} + +static const struct proc_ops led_proc_ops = { + .proc_open = led_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, + .proc_write = led_proc_write, +}; +#endif + +#define LED_VERSION "0.1" + +static int __init led_init(void) +{ + timer_setup(&led_blink_timer, led_blink, 0); + +#ifdef CONFIG_PROC_FS + if (!proc_create("led", 0, NULL, &led_proc_ops)) + return -ENOMEM; +#endif + printk(KERN_INFO + "led: version %s, Lars Kotthoff \n", + LED_VERSION); + + return 0; +} + +static void __exit led_exit(void) +{ + remove_proc_entry("led", NULL); + del_timer_sync(&led_blink_timer); +} + +module_init(led_init); +module_exit(led_exit); + +MODULE_AUTHOR("Lars Kotthoff "); +MODULE_DESCRIPTION("Provides control of the front LED on SPARC systems."); +MODULE_LICENSE("GPL"); +MODULE_VERSION(LED_VERSION); diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c new file mode 100644 index 000000000..39229940d --- /dev/null +++ b/arch/sparc/kernel/leon_kernel.c @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "prom.h" +#include "irq.h" + +struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */ +struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */ + +int leondebug_irq_disable; +int leon_debug_irqout; +static volatile u32 dummy_master_l10_counter; +unsigned long amba_system_id; +static DEFINE_SPINLOCK(leon_irq_lock); + +static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ +static unsigned long leon3_gptimer_ackmask; /* For clearing pending bit */ +unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ +unsigned int sparc_leon_eirq; +#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) +#define LEON_IACK (&leon3_irqctrl_regs->iclear) +#define LEON_DO_ACK_HW 1 + +/* Return the last ACKed IRQ by the Extended IRQ controller. It has already + * been (automatically) ACKed when the CPU takes the trap. + */ +static inline unsigned int leon_eirq_get(int cpu) +{ + return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f; +} + +/* Handle one or multiple IRQs from the extended interrupt controller */ +static void leon_handle_ext_irq(struct irq_desc *desc) +{ + unsigned int eirq; + struct irq_bucket *p; + int cpu = sparc_leon3_cpuid(); + + eirq = leon_eirq_get(cpu); + p = irq_map[eirq]; + if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */ + generic_handle_irq(p->irq); +} + +/* The extended IRQ controller has been found, this function registers it */ +static void leon_eirq_setup(unsigned int eirq) +{ + unsigned long mask, oldmask; + unsigned int veirq; + + if (eirq < 1 || eirq > 0xf) { + printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq); + return; + } + + veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0); + + /* + * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ + * controller have a mask-bit of their own, so this is safe. + */ + irq_link(veirq); + mask = 1 << eirq; + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask)); + sparc_leon_eirq = eirq; +} + +unsigned long leon_get_irqmask(unsigned int irq) +{ + unsigned long mask; + + if (!irq || ((irq > 0xf) && !sparc_leon_eirq) + || ((irq > 0x1f) && sparc_leon_eirq)) { + printk(KERN_ERR + "leon_get_irqmask: false irq number: %d\n", irq); + mask = 0; + } else { + mask = LEON_HARD_INT(irq); + } + return mask; +} + +#ifdef CONFIG_SMP +static int irq_choose_cpu(const struct cpumask *affinity) +{ + cpumask_t mask; + + cpumask_and(&mask, cpu_online_mask, affinity); + if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask)) + return boot_cpu_id; + else + return cpumask_first(&mask); +} +#else +#define irq_choose_cpu(affinity) boot_cpu_id +#endif + +static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest, + bool force) +{ + unsigned long mask, oldmask, flags; + int oldcpu, newcpu; + + mask = (unsigned long)data->chip_data; + oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); + newcpu = irq_choose_cpu(dest); + + if (oldcpu == newcpu) + goto out; + + /* unmask on old CPU first before enabling on the selected CPU */ + spin_lock_irqsave(&leon_irq_lock, flags); + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask)); + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask)); + spin_unlock_irqrestore(&leon_irq_lock, flags); +out: + return IRQ_SET_MASK_OK; +} + +static void leon_unmask_irq(struct irq_data *data) +{ + unsigned long mask, oldmask, flags; + int cpu; + + mask = (unsigned long)data->chip_data; + cpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); + spin_lock_irqsave(&leon_irq_lock, flags); + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask)); + spin_unlock_irqrestore(&leon_irq_lock, flags); +} + +static void leon_mask_irq(struct irq_data *data) +{ + unsigned long mask, oldmask, flags; + int cpu; + + mask = (unsigned long)data->chip_data; + cpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); + spin_lock_irqsave(&leon_irq_lock, flags); + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask)); + spin_unlock_irqrestore(&leon_irq_lock, flags); +} + +static unsigned int leon_startup_irq(struct irq_data *data) +{ + irq_link(data->irq); + leon_unmask_irq(data); + return 0; +} + +static void leon_shutdown_irq(struct irq_data *data) +{ + leon_mask_irq(data); + irq_unlink(data->irq); +} + +/* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */ +static void leon_eoi_irq(struct irq_data *data) +{ + unsigned long mask = (unsigned long)data->chip_data; + + if (mask & LEON_DO_ACK_HW) + LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW); +} + +static struct irq_chip leon_irq = { + .name = "leon", + .irq_startup = leon_startup_irq, + .irq_shutdown = leon_shutdown_irq, + .irq_mask = leon_mask_irq, + .irq_unmask = leon_unmask_irq, + .irq_eoi = leon_eoi_irq, + .irq_set_affinity = leon_set_affinity, +}; + +/* + * Build a LEON IRQ for the edge triggered LEON IRQ controller: + * Edge (normal) IRQ - handle_simple_irq, ack=DON'T-CARE, never ack + * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR + * Per-CPU Edge - handle_percpu_irq, ack=0 + */ +unsigned int leon_build_device_irq(unsigned int real_irq, + irq_flow_handler_t flow_handler, + const char *name, int do_ack) +{ + unsigned int irq; + unsigned long mask; + struct irq_desc *desc; + + irq = 0; + mask = leon_get_irqmask(real_irq); + if (mask == 0) + goto out; + + irq = irq_alloc(real_irq, real_irq); + if (irq == 0) + goto out; + + if (do_ack) + mask |= LEON_DO_ACK_HW; + + desc = irq_to_desc(irq); + if (!desc || !desc->handle_irq || desc->handle_irq == handle_bad_irq) { + irq_set_chip_and_handler_name(irq, &leon_irq, + flow_handler, name); + irq_set_chip_data(irq, (void *)mask); + } + +out: + return irq; +} + +static unsigned int _leon_build_device_irq(struct platform_device *op, + unsigned int real_irq) +{ + return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0); +} + +void leon_update_virq_handling(unsigned int virq, + irq_flow_handler_t flow_handler, + const char *name, int do_ack) +{ + unsigned long mask = (unsigned long)irq_get_chip_data(virq); + + mask &= ~LEON_DO_ACK_HW; + if (do_ack) + mask |= LEON_DO_ACK_HW; + + irq_set_chip_and_handler_name(virq, &leon_irq, + flow_handler, name); + irq_set_chip_data(virq, (void *)mask); +} + +static u32 leon_cycles_offset(void) +{ + u32 rld, val, ctrl, off; + + rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld); + val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val); + ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl); + if (LEON3_GPTIMER_CTRL_ISPENDING(ctrl)) { + val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val); + off = 2 * rld - val; + } else { + off = rld - val; + } + + return off; +} + +#ifdef CONFIG_SMP + +/* smp clockevent irq */ +static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) +{ + struct clock_event_device *ce; + int cpu = smp_processor_id(); + + leon_clear_profile_irq(cpu); + + if (cpu == boot_cpu_id) + timer_interrupt(irq, NULL); + + ce = &per_cpu(sparc32_clockevent, cpu); + + irq_enter(); + if (ce->event_handler) + ce->event_handler(ce); + irq_exit(); + + return IRQ_HANDLED; +} + +#endif /* CONFIG_SMP */ + +void __init leon_init_timers(void) +{ + int irq, eirq; + struct device_node *rootnp, *np, *nnp; + struct property *pp; + int len; + int icsel; + int ampopts; + int err; + u32 config; + u32 ctrl; + + sparc_config.get_cycles_offset = leon_cycles_offset; + sparc_config.cs_period = 1000000 / HZ; + sparc_config.features |= FEAT_L10_CLOCKSOURCE; + +#ifndef CONFIG_SMP + sparc_config.features |= FEAT_L10_CLOCKEVENT; +#endif + + leondebug_irq_disable = 0; + leon_debug_irqout = 0; + master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter; + dummy_master_l10_counter = 0; + + rootnp = of_find_node_by_path("/ambapp0"); + if (!rootnp) + goto bad; + + /* Find System ID: GRLIB build ID and optional CHIP ID */ + pp = of_find_property(rootnp, "systemid", &len); + if (pp) + amba_system_id = *(unsigned long *)pp->value; + + /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */ + np = of_find_node_by_name(rootnp, "GAISLER_IRQMP"); + if (!np) { + np = of_find_node_by_name(rootnp, "01_00d"); + if (!np) + goto bad; + } + pp = of_find_property(np, "reg", &len); + if (!pp) + goto bad; + leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value; + + /* Find GPTIMER Timer Registers base address otherwise bail out. */ + nnp = rootnp; + +retry: + np = of_find_node_by_name(nnp, "GAISLER_GPTIMER"); + if (!np) { + np = of_find_node_by_name(nnp, "01_011"); + if (!np) + goto bad; + } + + ampopts = 0; + pp = of_find_property(np, "ampopts", &len); + if (pp) { + ampopts = *(int *)pp->value; + if (ampopts == 0) { + /* Skip this instance, resource already + * allocated by other OS */ + nnp = np; + goto retry; + } + } + + /* Select Timer-Instance on Timer Core. Default is zero */ + leon3_gptimer_idx = ampopts & 0x7; + + pp = of_find_property(np, "reg", &len); + if (pp) + leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **) + pp->value; + pp = of_find_property(np, "interrupts", &len); + if (pp) + leon3_gptimer_irq = *(unsigned int *)pp->value; + + if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq)) + goto bad; + + ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, + ctrl | LEON3_GPTIMER_CTRL_PENDING); + ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl); + + if ((ctrl & LEON3_GPTIMER_CTRL_PENDING) != 0) + leon3_gptimer_ackmask = ~LEON3_GPTIMER_CTRL_PENDING; + else + leon3_gptimer_ackmask = ~0; + + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld, + (((1000000 / HZ) - 1))); + LEON3_BYPASS_STORE_PA( + &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); + + /* + * The IRQ controller may (if implemented) consist of multiple + * IRQ controllers, each mapped on a 4Kb boundary. + * Each CPU may be routed to different IRQCTRLs, however + * we assume that all CPUs (in SMP system) is routed to the + * same IRQ Controller, and for non-SMP only one IRQCTRL is + * accessed anyway. + * In AMP systems, Linux must run on CPU0 for the time being. + */ + icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]); + icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf; + leon3_irqctrl_regs += icsel; + + /* Mask all IRQs on boot-cpu IRQ controller */ + LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0); + + /* Probe extended IRQ controller */ + eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus) + >> 16) & 0xf; + if (eirq != 0) + leon_eirq_setup(eirq); + +#ifdef CONFIG_SMP + { + unsigned long flags; + + /* + * In SMP, sun4m adds a IPI handler to IRQ trap handler that + * LEON never must take, sun4d and LEON overwrites the branch + * with a NOP. + */ + local_irq_save(flags); + patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ + local_ops->cache_all(); + local_irq_restore(flags); + } +#endif + + config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); + if (config & (1 << LEON3_GPTIMER_SEPIRQ)) + leon3_gptimer_irq += leon3_gptimer_idx; + else if ((config & LEON3_GPTIMER_TIMERS) > 1) + pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); + +#ifdef CONFIG_SMP + /* Install per-cpu IRQ handler for broadcasted ticker */ + irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, + "per-cpu", 0); + err = request_irq(irq, leon_percpu_timer_ce_interrupt, + IRQF_PERCPU | IRQF_TIMER, "timer", NULL); +#else + irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); +#endif + if (err) { + pr_err("Unable to attach timer IRQ%d\n", irq); + prom_halt(); + } + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, + LEON3_GPTIMER_EN | + LEON3_GPTIMER_RL | + LEON3_GPTIMER_LD | + LEON3_GPTIMER_IRQEN); + return; +bad: + printk(KERN_ERR "No Timer/irqctrl found\n"); + BUG(); + return; +} + +static void leon_clear_clock_irq(void) +{ + u32 ctrl; + + ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, + ctrl & leon3_gptimer_ackmask); +} + +static void leon_load_profile_irq(int cpu, unsigned int limit) +{ +} + +#ifdef CONFIG_SMP +void leon_clear_profile_irq(int cpu) +{ +} + +void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu) +{ + unsigned long mask, flags, *addr; + mask = leon_get_irqmask(irq_nr); + spin_lock_irqsave(&leon_irq_lock, flags); + addr = (unsigned long *)LEON_IMASK(cpu); + LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask)); + spin_unlock_irqrestore(&leon_irq_lock, flags); +} + +#endif + +void __init leon_init_IRQ(void) +{ + sparc_config.init_timers = leon_init_timers; + sparc_config.build_device_irq = _leon_build_device_irq; + sparc_config.clock_rate = 1000000; + sparc_config.clear_clock_irq = leon_clear_clock_irq; + sparc_config.load_profile_irq = leon_load_profile_irq; +} diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c new file mode 100644 index 000000000..e5e5ff6b9 --- /dev/null +++ b/arch/sparc/kernel/leon_pci.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * leon_pci.c: LEON Host PCI support + * + * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom + * + * Code is partially derived from pcic.c + */ + +#include +#include +#include +#include +#include +#include + +/* The LEON architecture does not rely on a BIOS or bootloader to setup + * PCI for us. The Linux generic routines are used to setup resources, + * reset values of configuration-space register settings are preserved. + * + * PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is + * accessed through a Window which is translated to low 64KB in PCI space, the + * first 4KB is not used so 60KB is available. + */ +void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) +{ + LIST_HEAD(resources); + struct pci_bus *root_bus; + struct pci_host_bridge *bridge; + int ret; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return; + + pci_add_resource_offset(&resources, &info->io_space, + info->io_space.start - 0x1000); + pci_add_resource(&resources, &info->mem_space); + info->busn.flags = IORESOURCE_BUS; + pci_add_resource(&resources, &info->busn); + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = &ofdev->dev; + bridge->sysdata = info; + bridge->busnr = 0; + bridge->ops = info->ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = info->map_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + return; + } + + root_bus = bridge->bus; + + /* Assign devices with resources */ + pci_assign_unassigned_resources(); + pci_bus_add_devices(root_bus); +} + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + u16 cmd, oldcmd; + int i; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + oldcmd = cmd; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + /* Only set up the requested stuff */ + if (!(mask & (1<flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (res->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + + if (cmd != oldcmd) { + pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c new file mode 100644 index 000000000..e6935d0ac --- /dev/null +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -0,0 +1,722 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * leon_pci_grpci1.c: GRPCI1 Host PCI driver + * + * Copyright (C) 2013 Aeroflex Gaisler AB + * + * This GRPCI1 driver does not support PCI interrupts taken from + * GPIO pins. Interrupt generation at PCI parity and system error + * detection is by default turned off since some GRPCI1 cores does + * not support detection. It can be turned on from the bootloader + * using the all_pci_errors property. + * + * Contributors: Daniel Hellstrom + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "irq.h" + +/* Enable/Disable Debugging Configuration Space Access */ +#undef GRPCI1_DEBUG_CFGACCESS + +/* + * GRPCI1 APB Register MAP + */ +struct grpci1_regs { + unsigned int cfg_stat; /* 0x00 Configuration / Status */ + unsigned int bar0; /* 0x04 BAR0 (RO) */ + unsigned int page0; /* 0x08 PAGE0 (RO) */ + unsigned int bar1; /* 0x0C BAR1 (RO) */ + unsigned int page1; /* 0x10 PAGE1 */ + unsigned int iomap; /* 0x14 IO Map */ + unsigned int stat_cmd; /* 0x18 PCI Status & Command (RO) */ + unsigned int irq; /* 0x1C Interrupt register */ +}; + +#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) +#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) + +#define PAGE0_BTEN_BIT 0 +#define PAGE0_BTEN (1 << PAGE0_BTEN_BIT) + +#define CFGSTAT_HOST_BIT 13 +#define CFGSTAT_CTO_BIT 8 +#define CFGSTAT_HOST (1 << CFGSTAT_HOST_BIT) +#define CFGSTAT_CTO (1 << CFGSTAT_CTO_BIT) + +#define IRQ_DPE (1 << 9) +#define IRQ_SSE (1 << 8) +#define IRQ_RMA (1 << 7) +#define IRQ_RTA (1 << 6) +#define IRQ_STA (1 << 5) +#define IRQ_DPED (1 << 4) +#define IRQ_INTD (1 << 3) +#define IRQ_INTC (1 << 2) +#define IRQ_INTB (1 << 1) +#define IRQ_INTA (1 << 0) +#define IRQ_DEF_ERRORS (IRQ_RMA | IRQ_RTA | IRQ_STA) +#define IRQ_ALL_ERRORS (IRQ_DPED | IRQ_DEF_ERRORS | IRQ_SSE | IRQ_DPE) +#define IRQ_INTX (IRQ_INTA | IRQ_INTB | IRQ_INTC | IRQ_INTD) +#define IRQ_MASK_BIT 16 + +#define DEF_PCI_ERRORS (PCI_STATUS_SIG_TARGET_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_REC_MASTER_ABORT) +#define ALL_PCI_ERRORS (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | DEF_PCI_ERRORS) + +#define TGT 256 + +struct grpci1_priv { + struct leon_pci_info info; /* must be on top of this structure */ + struct grpci1_regs __iomem *regs; /* GRPCI register map */ + struct device *dev; + int pci_err_mask; /* STATUS register error mask */ + int irq; /* LEON irqctrl GRPCI IRQ */ + unsigned char irq_map[4]; /* GRPCI nexus PCI INTX# IRQs */ + unsigned int irq_err; /* GRPCI nexus Virt Error IRQ */ + + /* AHB PCI Windows */ + unsigned long pci_area; /* MEMORY */ + unsigned long pci_area_end; + unsigned long pci_io; /* I/O */ + unsigned long pci_conf; /* CONFIGURATION */ + unsigned long pci_conf_end; + unsigned long pci_io_va; +}; + +static struct grpci1_priv *grpci1priv; + +static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val); + +static int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct grpci1_priv *priv = dev->bus->sysdata; + int irq_group; + + /* Use default IRQ decoding on PCI BUS0 according slot numbering */ + irq_group = slot & 0x3; + pin = ((pin - 1) + irq_group) & 0x3; + + return priv->irq_map[pin]; +} + +static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 *pci_conf, tmp, cfg; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } + + /* Select bus */ + cfg = REGLOAD(priv->regs->cfg_stat); + REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23)); + + /* do read access */ + pci_conf = (u32 *) (priv->pci_conf | (devfn << 8) | (where & 0xfc)); + tmp = LEON3_BYPASS_LOAD_PA(pci_conf); + + /* check if master abort was received */ + if (REGLOAD(priv->regs->cfg_stat) & CFGSTAT_CTO) { + *val = 0xffffffff; + /* Clear Master abort bit in PCI cfg space (is set) */ + tmp = REGLOAD(priv->regs->stat_cmd); + grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp); + } else { + /* Bus always little endian (unaffected by byte-swapping) */ + *val = swab32(tmp); + } + + return 0; +} + +static int grpci1_cfg_r16(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + if (where & 0x1) + return -EINVAL; + ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xffff & (v >> (8 * (where & 0x3))); + return ret; +} + +static int grpci1_cfg_r8(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xff & (v >> (8 * (where & 3))); + + return ret; +} + +static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + unsigned int *pci_conf; + u32 cfg; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } + + /* Select bus */ + cfg = REGLOAD(priv->regs->cfg_stat); + REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23)); + + pci_conf = (unsigned int *) (priv->pci_conf | + (devfn << 8) | (where & 0xfc)); + LEON3_BYPASS_STORE_PA(pci_conf, swab32(val)); + + return 0; +} + +static int grpci1_cfg_w16(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + if (where & 0x1) + return -EINVAL; + ret = grpci1_cfg_r32(priv, bus, devfn, where&~3, &v); + if (ret) + return ret; + v = (v & ~(0xffff << (8 * (where & 0x3)))) | + ((0xffff & val) << (8 * (where & 0x3))); + return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +static int grpci1_cfg_w8(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + if (ret != 0) + return ret; + v = (v & ~(0xff << (8 * (where & 0x3)))) | + ((0xff & val) << (8 * (where & 0x3))); + return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +/* Read from Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci1_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct grpci1_priv *priv = grpci1priv; + unsigned int busno = bus->number; + int ret; + + if (PCI_SLOT(devfn) > 15 || busno > 15) { + *val = ~0; + return 0; + } + + switch (size) { + case 1: + ret = grpci1_cfg_r8(priv, busno, devfn, where, val); + break; + case 2: + ret = grpci1_cfg_r16(priv, busno, devfn, where, val); + break; + case 4: + ret = grpci1_cfg_r32(priv, busno, devfn, where, val); + break; + default: + ret = -EINVAL; + break; + } + +#ifdef GRPCI1_DEBUG_CFGACCESS + printk(KERN_INFO + "grpci1_read_config: [%02x:%02x:%x] ofs=%d val=%x size=%d\n", + busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, *val, size); +#endif + + return ret; +} + +/* Write to Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci1_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct grpci1_priv *priv = grpci1priv; + unsigned int busno = bus->number; + + if (PCI_SLOT(devfn) > 15 || busno > 15) + return 0; + +#ifdef GRPCI1_DEBUG_CFGACCESS + printk(KERN_INFO + "grpci1_write_config: [%02x:%02x:%x] ofs=%d size=%d val=%x\n", + busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); +#endif + + switch (size) { + default: + return -EINVAL; + case 1: + return grpci1_cfg_w8(priv, busno, devfn, where, val); + case 2: + return grpci1_cfg_w16(priv, busno, devfn, where, val); + case 4: + return grpci1_cfg_w32(priv, busno, devfn, where, val); + } +} + +static struct pci_ops grpci1_ops = { + .read = grpci1_read_config, + .write = grpci1_write_config, +}; + +/* GENIRQ IRQ chip implementation for grpci1 irqmode=0..2. In configuration + * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller + * this is not needed and the standard IRQ controller can be used. + */ + +static void grpci1_mask_irq(struct irq_data *data) +{ + u32 irqidx; + struct grpci1_priv *priv = grpci1priv; + + irqidx = (u32)data->chip_data - 1; + if (irqidx > 3) /* only mask PCI interrupts here */ + return; + irqidx += IRQ_MASK_BIT; + + REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) & ~(1 << irqidx)); +} + +static void grpci1_unmask_irq(struct irq_data *data) +{ + u32 irqidx; + struct grpci1_priv *priv = grpci1priv; + + irqidx = (u32)data->chip_data - 1; + if (irqidx > 3) /* only unmask PCI interrupts here */ + return; + irqidx += IRQ_MASK_BIT; + + REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) | (1 << irqidx)); +} + +static unsigned int grpci1_startup_irq(struct irq_data *data) +{ + grpci1_unmask_irq(data); + return 0; +} + +static void grpci1_shutdown_irq(struct irq_data *data) +{ + grpci1_mask_irq(data); +} + +static struct irq_chip grpci1_irq = { + .name = "grpci1", + .irq_startup = grpci1_startup_irq, + .irq_shutdown = grpci1_shutdown_irq, + .irq_mask = grpci1_mask_irq, + .irq_unmask = grpci1_unmask_irq, +}; + +/* Handle one or multiple IRQs from the PCI core */ +static void grpci1_pci_flow_irq(struct irq_desc *desc) +{ + struct grpci1_priv *priv = grpci1priv; + int i, ack = 0; + unsigned int irqreg; + + irqreg = REGLOAD(priv->regs->irq); + irqreg = (irqreg >> IRQ_MASK_BIT) & irqreg; + + /* Error Interrupt? */ + if (irqreg & IRQ_ALL_ERRORS) { + generic_handle_irq(priv->irq_err); + ack = 1; + } + + /* PCI Interrupt? */ + if (irqreg & IRQ_INTX) { + /* Call respective PCI Interrupt handler */ + for (i = 0; i < 4; i++) { + if (irqreg & (1 << i)) + generic_handle_irq(priv->irq_map[i]); + } + ack = 1; + } + + /* + * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ + * Controller, this must be done after IRQ sources have been handled to + * avoid double IRQ generation + */ + if (ack) + desc->irq_data.chip->irq_eoi(&desc->irq_data); +} + +/* Create a virtual IRQ */ +static unsigned int grpci1_build_device_irq(unsigned int irq) +{ + unsigned int virq = 0, pil; + + pil = 1 << 8; + virq = irq_alloc(irq, pil); + if (virq == 0) + goto out; + + irq_set_chip_and_handler_name(virq, &grpci1_irq, handle_simple_irq, + "pcilvl"); + irq_set_chip_data(virq, (void *)irq); + +out: + return virq; +} + +/* + * Initialize mappings AMBA<->PCI, clear IRQ state, setup PCI interface + * + * Target BARs: + * BAR0: unused in this implementation + * BAR1: peripheral DMA to host's memory (size at least 256MByte) + * BAR2..BAR5: not implemented in hardware + */ +static void grpci1_hw_init(struct grpci1_priv *priv) +{ + u32 ahbadr, bar_sz, data, pciadr; + struct grpci1_regs __iomem *regs = priv->regs; + + /* set 1:1 mapping between AHB -> PCI memory space */ + REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000); + + /* map PCI accesses to target BAR1 to Linux kernel memory 1:1 */ + ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN((unsigned long) &_end)); + REGSTORE(regs->page1, ahbadr); + + /* translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */ + REGSTORE(regs->iomap, REGLOAD(regs->iomap) & 0x0000ffff); + + /* disable and clear pending interrupts */ + REGSTORE(regs->irq, 0); + + /* Setup BAR0 outside access range so that it does not conflict with + * peripheral DMA. There is no need to set up the PAGE0 register. + */ + grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, 0xffffffff); + grpci1_cfg_r32(priv, TGT, 0, PCI_BASE_ADDRESS_0, &bar_sz); + bar_sz = ~bar_sz + 1; + pciadr = priv->pci_area - bar_sz; + grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, pciadr); + + /* + * Setup the Host's PCI Target BAR1 for other peripherals to access, + * and do DMA to the host's memory. + */ + grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_1, ahbadr); + + /* + * Setup Latency Timer and cache line size. Default cache line + * size will result in poor performance (256 word fetches), 0xff + * will set it according to the max size of the PCI FIFO. + */ + grpci1_cfg_w8(priv, TGT, 0, PCI_CACHE_LINE_SIZE, 0xff); + grpci1_cfg_w8(priv, TGT, 0, PCI_LATENCY_TIMER, 0x40); + + /* set as bus master, enable pci memory responses, clear status bits */ + grpci1_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); + data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); +} + +static irqreturn_t grpci1_jump_interrupt(int irq, void *arg) +{ + struct grpci1_priv *priv = arg; + dev_err(priv->dev, "Jump IRQ happened\n"); + return IRQ_NONE; +} + +/* Handle GRPCI1 Error Interrupt */ +static irqreturn_t grpci1_err_interrupt(int irq, void *arg) +{ + struct grpci1_priv *priv = arg; + u32 status; + + grpci1_cfg_r16(priv, TGT, 0, PCI_STATUS, &status); + status &= priv->pci_err_mask; + + if (status == 0) + return IRQ_NONE; + + if (status & PCI_STATUS_PARITY) + dev_err(priv->dev, "Data Parity Error\n"); + + if (status & PCI_STATUS_SIG_TARGET_ABORT) + dev_err(priv->dev, "Signalled Target Abort\n"); + + if (status & PCI_STATUS_REC_TARGET_ABORT) + dev_err(priv->dev, "Received Target Abort\n"); + + if (status & PCI_STATUS_REC_MASTER_ABORT) + dev_err(priv->dev, "Received Master Abort\n"); + + if (status & PCI_STATUS_SIG_SYSTEM_ERROR) + dev_err(priv->dev, "Signalled System Error\n"); + + if (status & PCI_STATUS_DETECTED_PARITY) + dev_err(priv->dev, "Parity Error\n"); + + /* Clear handled INT TYPE IRQs */ + grpci1_cfg_w16(priv, TGT, 0, PCI_STATUS, status); + + return IRQ_HANDLED; +} + +static int grpci1_of_probe(struct platform_device *ofdev) +{ + struct grpci1_regs __iomem *regs; + struct grpci1_priv *priv; + int err, len; + const int *tmp; + u32 cfg, size, err_mask; + struct resource *res; + + if (grpci1priv) { + dev_err(&ofdev->dev, "only one GRPCI1 supported\n"); + return -ENODEV; + } + + if (ofdev->num_resources < 3) { + dev_err(&ofdev->dev, "not enough APB/AHB resources\n"); + return -EIO; + } + + priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&ofdev->dev, "memory allocation failed\n"); + return -ENOMEM; + } + platform_set_drvdata(ofdev, priv); + priv->dev = &ofdev->dev; + + /* find device register base address */ + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + /* + * check that we're in Host Slot and that we can act as a Host Bridge + * and not only as target/peripheral. + */ + cfg = REGLOAD(regs->cfg_stat); + if ((cfg & CFGSTAT_HOST) == 0) { + dev_err(&ofdev->dev, "not in host system slot\n"); + return -EIO; + } + + /* check that BAR1 support 256 MByte so that we can map kernel space */ + REGSTORE(regs->page1, 0xffffffff); + size = ~REGLOAD(regs->page1) + 1; + if (size < 0x10000000) { + dev_err(&ofdev->dev, "BAR1 must be at least 256MByte\n"); + return -EIO; + } + + /* hardware must support little-endian PCI (byte-twisting) */ + if ((REGLOAD(regs->page0) & PAGE0_BTEN) == 0) { + dev_err(&ofdev->dev, "byte-twisting is required\n"); + return -EIO; + } + + priv->regs = regs; + priv->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + dev_info(&ofdev->dev, "host found at 0x%p, irq%d\n", regs, priv->irq); + + /* Find PCI Memory, I/O and Configuration Space Windows */ + priv->pci_area = ofdev->resource[1].start; + priv->pci_area_end = ofdev->resource[1].end+1; + priv->pci_io = ofdev->resource[2].start; + priv->pci_conf = ofdev->resource[2].start + 0x10000; + priv->pci_conf_end = priv->pci_conf + 0x10000; + priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000); + if (!priv->pci_io_va) { + dev_err(&ofdev->dev, "unable to map PCI I/O area\n"); + return -EIO; + } + + printk(KERN_INFO + "GRPCI1: MEMORY SPACE [0x%08lx - 0x%08lx]\n" + " I/O SPACE [0x%08lx - 0x%08lx]\n" + " CONFIG SPACE [0x%08lx - 0x%08lx]\n", + priv->pci_area, priv->pci_area_end-1, + priv->pci_io, priv->pci_conf-1, + priv->pci_conf, priv->pci_conf_end-1); + + /* + * I/O Space resources in I/O Window mapped into Virtual Adr Space + * We never use low 4KB because some devices seem have problems using + * address 0. + */ + priv->info.io_space.name = "GRPCI1 PCI I/O Space"; + priv->info.io_space.start = priv->pci_io_va + 0x1000; + priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1; + priv->info.io_space.flags = IORESOURCE_IO; + + /* + * grpci1 has no prefetchable memory, map everything as + * non-prefetchable memory + */ + priv->info.mem_space.name = "GRPCI1 PCI MEM Space"; + priv->info.mem_space.start = priv->pci_area; + priv->info.mem_space.end = priv->pci_area_end - 1; + priv->info.mem_space.flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) { + dev_err(&ofdev->dev, "unable to request PCI memory area\n"); + err = -ENOMEM; + goto err1; + } + + if (request_resource(&ioport_resource, &priv->info.io_space) < 0) { + dev_err(&ofdev->dev, "unable to request PCI I/O area\n"); + err = -ENOMEM; + goto err2; + } + + /* setup maximum supported PCI buses */ + priv->info.busn.name = "GRPCI1 busn"; + priv->info.busn.start = 0; + priv->info.busn.end = 15; + + grpci1priv = priv; + + /* Initialize hardware */ + grpci1_hw_init(priv); + + /* + * Get PCI Interrupt to System IRQ mapping and setup IRQ handling + * Error IRQ. All PCI and PCI-Error interrupts are shared using the + * same system IRQ. + */ + leon_update_virq_handling(priv->irq, grpci1_pci_flow_irq, "pcilvl", 0); + + priv->irq_map[0] = grpci1_build_device_irq(1); + priv->irq_map[1] = grpci1_build_device_irq(2); + priv->irq_map[2] = grpci1_build_device_irq(3); + priv->irq_map[3] = grpci1_build_device_irq(4); + priv->irq_err = grpci1_build_device_irq(5); + + printk(KERN_INFO " PCI INTA..D#: IRQ%d, IRQ%d, IRQ%d, IRQ%d\n", + priv->irq_map[0], priv->irq_map[1], priv->irq_map[2], + priv->irq_map[3]); + + /* Enable IRQs on LEON IRQ controller */ + err = devm_request_irq(&ofdev->dev, priv->irq, grpci1_jump_interrupt, 0, + "GRPCI1_JUMP", priv); + if (err) { + dev_err(&ofdev->dev, "ERR IRQ request failed: %d\n", err); + goto err3; + } + + /* Setup IRQ handler for access errors */ + err = devm_request_irq(&ofdev->dev, priv->irq_err, + grpci1_err_interrupt, IRQF_SHARED, "GRPCI1_ERR", + priv); + if (err) { + dev_err(&ofdev->dev, "ERR VIRQ request failed: %d\n", err); + goto err3; + } + + tmp = of_get_property(ofdev->dev.of_node, "all_pci_errors", &len); + if (tmp && (len == 4)) { + priv->pci_err_mask = ALL_PCI_ERRORS; + err_mask = IRQ_ALL_ERRORS << IRQ_MASK_BIT; + } else { + priv->pci_err_mask = DEF_PCI_ERRORS; + err_mask = IRQ_DEF_ERRORS << IRQ_MASK_BIT; + } + + /* + * Enable Error Interrupts. PCI interrupts are unmasked once request_irq + * is called by the PCI Device drivers + */ + REGSTORE(regs->irq, err_mask); + + /* Init common layer and scan buses */ + priv->info.ops = &grpci1_ops; + priv->info.map_irq = grpci1_map_irq; + leon_pci_init(ofdev, &priv->info); + + return 0; + +err3: + release_resource(&priv->info.io_space); +err2: + release_resource(&priv->info.mem_space); +err1: + iounmap((void __iomem *)priv->pci_io_va); + grpci1priv = NULL; + return err; +} + +static const struct of_device_id grpci1_of_match[] __initconst = { + { + .name = "GAISLER_PCIFBRG", + }, + { + .name = "01_014", + }, + {}, +}; + +static struct platform_driver grpci1_of_driver = { + .driver = { + .name = "grpci1", + .of_match_table = grpci1_of_match, + }, + .probe = grpci1_of_probe, +}; + +static int __init grpci1_init(void) +{ + return platform_driver_register(&grpci1_of_driver); +} + +subsys_initcall(grpci1_init); diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c new file mode 100644 index 000000000..ca22f93d9 --- /dev/null +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -0,0 +1,913 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * leon_pci_grpci2.c: GRPCI2 Host PCI driver + * + * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq.h" + +struct grpci2_barcfg { + unsigned long pciadr; /* PCI Space Address */ + unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */ +}; + +/* Device Node Configuration options: + * - barcfgs : Custom Configuration of Host's 6 target BARs + * - irq_mask : Limit which PCI interrupts are enabled + * - do_reset : Force PCI Reset on startup + * + * barcfgs + * ======= + * + * Optional custom Target BAR configuration (see struct grpci2_barcfg). All + * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes) + * + * -1 means not configured (let host driver do default setup). + * + * [i*2+0] = PCI Address of BAR[i] on target interface + * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address + * + * + * irq_mask + * ======== + * + * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default + * all are enabled. Use this when PCI interrupt pins are floating on PCB. + * int, len=4. + * bit0 = PCI INTA# + * bit1 = PCI INTB# + * bit2 = PCI INTC# + * bit3 = PCI INTD# + * + * + * reset + * ===== + * + * Force PCI reset on startup. int, len=4 + */ + +/* Enable Debugging Configuration Space Access */ +#undef GRPCI2_DEBUG_CFGACCESS + +/* + * GRPCI2 APB Register MAP + */ +struct grpci2_regs { + unsigned int ctrl; /* 0x00 Control */ + unsigned int sts_cap; /* 0x04 Status / Capabilities */ + int res1; /* 0x08 */ + unsigned int io_map; /* 0x0C I/O Map address */ + unsigned int dma_ctrl; /* 0x10 DMA */ + unsigned int dma_bdbase; /* 0x14 DMA */ + int res2[2]; /* 0x18 */ + unsigned int bars[6]; /* 0x20 read-only PCI BARs */ + int res3[2]; /* 0x38 */ + unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */ + + /* PCI Trace Buffer Registers (OPTIONAL) */ + unsigned int t_ctrl; /* 0x80 */ + unsigned int t_cnt; /* 0x84 */ + unsigned int t_adpat; /* 0x88 */ + unsigned int t_admask; /* 0x8C */ + unsigned int t_sigpat; /* 0x90 */ + unsigned int t_sigmask; /* 0x94 */ + unsigned int t_adstate; /* 0x98 */ + unsigned int t_sigstate; /* 0x9C */ +}; + +#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) +#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) + +#define CTRL_BUS_BIT 16 + +#define CTRL_RESET (1<<31) +#define CTRL_SI (1<<27) +#define CTRL_PE (1<<26) +#define CTRL_EI (1<<25) +#define CTRL_ER (1<<24) +#define CTRL_BUS (0xff<bus->sysdata; + int irq_group; + + /* Use default IRQ decoding on PCI BUS0 according slot numbering */ + irq_group = slot & 0x3; + pin = ((pin - 1) + irq_group) & 0x3; + + return priv->irq_map[pin]; +} + +static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + unsigned int *pci_conf; + unsigned long flags; + u32 tmp; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } + + /* Select bus */ + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | + (bus << 16)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); + + /* clear old status */ + REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); + + pci_conf = (unsigned int *) (priv->pci_conf | + (devfn << 8) | (where & 0xfc)); + tmp = LEON3_BYPASS_LOAD_PA(pci_conf); + + /* Wait until GRPCI2 signals that CFG access is done, it should be + * done instantaneously unless a DMA operation is ongoing... + */ + while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) + ; + + if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) { + *val = 0xffffffff; + } else { + /* Bus always little endian (unaffected by byte-swapping) */ + *val = swab32(tmp); + } + + return 0; +} + +static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + if (where & 0x1) + return -EINVAL; + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xffff & (v >> (8 * (where & 0x3))); + return ret; +} + +static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xff & (v >> (8 * (where & 3))); + + return ret; +} + +static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + unsigned int *pci_conf; + unsigned long flags; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } + + /* Select bus */ + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | + (bus << 16)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); + + /* clear old status */ + REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); + + pci_conf = (unsigned int *) (priv->pci_conf | + (devfn << 8) | (where & 0xfc)); + LEON3_BYPASS_STORE_PA(pci_conf, swab32(val)); + + /* Wait until GRPCI2 signals that CFG access is done, it should be + * done instantaneously unless a DMA operation is ongoing... + */ + while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) + ; + + return 0; +} + +static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + if (where & 0x1) + return -EINVAL; + ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v); + if (ret) + return ret; + v = (v & ~(0xffff << (8 * (where & 0x3)))) | + ((0xffff & val) << (8 * (where & 0x3))); + return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + if (ret != 0) + return ret; + v = (v & ~(0xff << (8 * (where & 0x3)))) | + ((0xff & val) << (8 * (where & 0x3))); + return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +/* Read from Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct grpci2_priv *priv = grpci2priv; + unsigned int busno = bus->number; + int ret; + + if (PCI_SLOT(devfn) > 15 || busno > 255) { + *val = ~0; + return 0; + } + + switch (size) { + case 1: + ret = grpci2_cfg_r8(priv, busno, devfn, where, val); + break; + case 2: + ret = grpci2_cfg_r16(priv, busno, devfn, where, val); + break; + case 4: + ret = grpci2_cfg_r32(priv, busno, devfn, where, val); + break; + default: + ret = -EINVAL; + break; + } + +#ifdef GRPCI2_DEBUG_CFGACCESS + printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x " + "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, + *val, size); +#endif + + return ret; +} + +/* Write to Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct grpci2_priv *priv = grpci2priv; + unsigned int busno = bus->number; + + if (PCI_SLOT(devfn) > 15 || busno > 255) + return 0; + +#ifdef GRPCI2_DEBUG_CFGACCESS + printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d " + "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), + where, size, val); +#endif + + switch (size) { + default: + return -EINVAL; + case 1: + return grpci2_cfg_w8(priv, busno, devfn, where, val); + case 2: + return grpci2_cfg_w16(priv, busno, devfn, where, val); + case 4: + return grpci2_cfg_w32(priv, busno, devfn, where, val); + } +} + +static struct pci_ops grpci2_ops = { + .read = grpci2_read_config, + .write = grpci2_write_config, +}; + +/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration + * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller + * this is not needed and the standard IRQ controller can be used. + */ + +static void grpci2_mask_irq(struct irq_data *data) +{ + unsigned long flags; + unsigned int irqidx; + struct grpci2_priv *priv = grpci2priv; + + irqidx = (unsigned int)data->chip_data - 1; + if (irqidx > 3) /* only mask PCI interrupts here */ + return; + + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); +} + +static void grpci2_unmask_irq(struct irq_data *data) +{ + unsigned long flags; + unsigned int irqidx; + struct grpci2_priv *priv = grpci2priv; + + irqidx = (unsigned int)data->chip_data - 1; + if (irqidx > 3) /* only unmask PCI interrupts here */ + return; + + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); +} + +static unsigned int grpci2_startup_irq(struct irq_data *data) +{ + grpci2_unmask_irq(data); + return 0; +} + +static void grpci2_shutdown_irq(struct irq_data *data) +{ + grpci2_mask_irq(data); +} + +static struct irq_chip grpci2_irq = { + .name = "grpci2", + .irq_startup = grpci2_startup_irq, + .irq_shutdown = grpci2_shutdown_irq, + .irq_mask = grpci2_mask_irq, + .irq_unmask = grpci2_unmask_irq, +}; + +/* Handle one or multiple IRQs from the PCI core */ +static void grpci2_pci_flow_irq(struct irq_desc *desc) +{ + struct grpci2_priv *priv = grpci2priv; + int i, ack = 0; + unsigned int ctrl, sts_cap, pci_ints; + + ctrl = REGLOAD(priv->regs->ctrl); + sts_cap = REGLOAD(priv->regs->sts_cap); + + /* Error Interrupt? */ + if (sts_cap & STS_ERR_IRQ) { + generic_handle_irq(priv->virq_err); + ack = 1; + } + + /* PCI Interrupt? */ + pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT; + if (pci_ints) { + /* Call respective PCI Interrupt handler */ + for (i = 0; i < 4; i++) { + if (pci_ints & (1 << i)) + generic_handle_irq(priv->irq_map[i]); + } + ack = 1; + } + + /* + * Decode DMA Interrupt only when shared with Err and PCI INTX#, when + * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they + * goes directly to DMA ISR. + */ + if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) { + generic_handle_irq(priv->virq_dma); + ack = 1; + } + + /* + * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ + * Controller, this must be done after IRQ sources have been handled to + * avoid double IRQ generation + */ + if (ack) + desc->irq_data.chip->irq_eoi(&desc->irq_data); +} + +/* Create a virtual IRQ */ +static unsigned int grpci2_build_device_irq(unsigned int irq) +{ + unsigned int virq = 0, pil; + + pil = 1 << 8; + virq = irq_alloc(irq, pil); + if (virq == 0) + goto out; + + irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq, + "pcilvl"); + irq_set_chip_data(virq, (void *)irq); + +out: + return virq; +} + +static void grpci2_hw_init(struct grpci2_priv *priv) +{ + u32 ahbadr, pciadr, bar_sz, capptr, io_map, data; + struct grpci2_regs __iomem *regs = priv->regs; + int i; + struct grpci2_barcfg *barcfg = priv->tgtbars; + + /* Reset any earlier setup */ + if (priv->do_reset) { + printk(KERN_INFO "GRPCI2: Resetting PCI bus\n"); + REGSTORE(regs->ctrl, CTRL_RESET); + ssleep(1); /* Wait for boards to settle */ + } + REGSTORE(regs->ctrl, 0); + REGSTORE(regs->sts_cap, ~0); /* Clear Status */ + REGSTORE(regs->dma_ctrl, 0); + REGSTORE(regs->dma_bdbase, 0); + + /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */ + REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff); + + /* set 1:1 mapping between AHB -> PCI memory space, for all Masters + * Each AHB master has it's own mapping registers. Max 16 AHB masters. + */ + for (i = 0; i < 16; i++) + REGSTORE(regs->ahbmst_map[i], priv->pci_area); + + /* Get the GRPCI2 Host PCI ID */ + grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); + + /* Get address to first (always defined) capability structure */ + grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); + + /* Enable/Disable Byte twisting */ + grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); + io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); + + /* Setup the Host's PCI Target BARs for other peripherals to access, + * and do DMA to the host's memory. The target BARs can be sized and + * enabled individually. + * + * User may set custom target BARs, but default is: + * The first BARs is used to map kernel low (DMA is part of normal + * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the + * PCI bus, the other BARs are disabled. We assume that the first BAR + * is always available. + */ + for (i = 0; i < 6; i++) { + if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) { + /* Target BARs must have the proper alignment */ + ahbadr = barcfg[i].ahbadr; + pciadr = barcfg[i].pciadr; + bar_sz = ((pciadr - 1) & ~pciadr) + 1; + } else { + if (i == 0) { + /* Map main memory */ + bar_sz = 0xf0000008; /* 256MB prefetchable */ + ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN( + (unsigned long) &_end)); + pciadr = ahbadr; + } else { + bar_sz = 0; + ahbadr = 0; + pciadr = 0; + } + } + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, + bar_sz); + grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); + printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", + i, pciadr, ahbadr); + } + + /* set as bus master and enable pci memory responses */ + grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); + data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); + + /* Enable Error respone (CPU-TRAP) on illegal memory access. */ + REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); +} + +static irqreturn_t grpci2_jump_interrupt(int irq, void *arg) +{ + printk(KERN_ERR "GRPCI2: Jump IRQ happened\n"); + return IRQ_NONE; +} + +/* Handle GRPCI2 Error Interrupt */ +static irqreturn_t grpci2_err_interrupt(int irq, void *arg) +{ + struct grpci2_priv *priv = arg; + struct grpci2_regs __iomem *regs = priv->regs; + unsigned int status; + + status = REGLOAD(regs->sts_cap); + if ((status & STS_ERR_IRQ) == 0) + return IRQ_NONE; + + if (status & STS_IPARERR) + printk(KERN_ERR "GRPCI2: Parity Error\n"); + + if (status & STS_ITGTABRT) + printk(KERN_ERR "GRPCI2: Target Abort\n"); + + if (status & STS_IMSTABRT) + printk(KERN_ERR "GRPCI2: Master Abort\n"); + + if (status & STS_ISYSERR) + printk(KERN_ERR "GRPCI2: System Error\n"); + + /* Clear handled INT TYPE IRQs */ + REGSTORE(regs->sts_cap, status & STS_ERR_IRQ); + + return IRQ_HANDLED; +} + +static int grpci2_of_probe(struct platform_device *ofdev) +{ + struct grpci2_regs __iomem *regs; + struct grpci2_priv *priv; + int err, i, len; + const int *tmp; + unsigned int capability; + + if (grpci2priv) { + printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n"); + return -ENODEV; + } + + if (ofdev->num_resources < 3) { + printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n"); + return -EIO; + } + + /* Find Device Address */ + regs = of_ioremap(&ofdev->resource[0], 0, + resource_size(&ofdev->resource[0]), + "grlib-grpci2 regs"); + if (regs == NULL) { + printk(KERN_ERR "GRPCI2: ioremap failed\n"); + return -EIO; + } + + /* + * Check that we're in Host Slot and that we can act as a Host Bridge + * and not only as target. + */ + capability = REGLOAD(regs->sts_cap); + if ((capability & STS_HOST) || !(capability & STS_MST)) { + printk(KERN_INFO "GRPCI2: not in host system slot\n"); + err = -EIO; + goto err1; + } + + priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL); + if (grpci2priv == NULL) { + err = -ENOMEM; + goto err1; + } + priv->regs = regs; + priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ + priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; + + printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq); + + /* Byte twisting should be made configurable from kernel command line */ + priv->bt_enabled = 1; + + /* Let user do custom Target BAR assignment */ + tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len); + if (tmp && (len == 2*4*6)) + memcpy(priv->tgtbars, tmp, 2*4*6); + else + memset(priv->tgtbars, -1, 2*4*6); + + /* Limit IRQ unmasking in irq_mode 2 and 3 */ + tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len); + if (tmp && (len == 4)) + priv->do_reset = *tmp; + else + priv->irq_mask = 0xf; + + /* Optional PCI reset. Force PCI reset on startup */ + tmp = of_get_property(ofdev->dev.of_node, "reset", &len); + if (tmp && (len == 4)) + priv->do_reset = *tmp; + else + priv->do_reset = 0; + + /* Find PCI Memory, I/O and Configuration Space Windows */ + priv->pci_area = ofdev->resource[1].start; + priv->pci_area_end = ofdev->resource[1].end+1; + priv->pci_io = ofdev->resource[2].start; + priv->pci_conf = ofdev->resource[2].start + 0x10000; + priv->pci_conf_end = priv->pci_conf + 0x10000; + priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000); + if (!priv->pci_io_va) { + err = -EIO; + goto err2; + } + + printk(KERN_INFO + "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n" + " I/O SPACE [0x%08lx - 0x%08lx]\n" + " CONFIG SPACE [0x%08lx - 0x%08lx]\n", + priv->pci_area, priv->pci_area_end-1, + priv->pci_io, priv->pci_conf-1, + priv->pci_conf, priv->pci_conf_end-1); + + /* + * I/O Space resources in I/O Window mapped into Virtual Adr Space + * We never use low 4KB because some devices seem have problems using + * address 0. + */ + memset(&priv->info.io_space, 0, sizeof(struct resource)); + priv->info.io_space.name = "GRPCI2 PCI I/O Space"; + priv->info.io_space.start = priv->pci_io_va + 0x1000; + priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1; + priv->info.io_space.flags = IORESOURCE_IO; + + /* + * GRPCI2 has no prefetchable memory, map everything as + * non-prefetchable memory + */ + memset(&priv->info.mem_space, 0, sizeof(struct resource)); + priv->info.mem_space.name = "GRPCI2 PCI MEM Space"; + priv->info.mem_space.start = priv->pci_area; + priv->info.mem_space.end = priv->pci_area_end - 1; + priv->info.mem_space.flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) + goto err3; + if (request_resource(&ioport_resource, &priv->info.io_space) < 0) + goto err4; + + /* setup maximum supported PCI buses */ + priv->info.busn.name = "GRPCI2 busn"; + priv->info.busn.start = 0; + priv->info.busn.end = 255; + + grpci2_hw_init(priv); + + /* + * Get PCI Interrupt to System IRQ mapping and setup IRQ handling + * Error IRQ always on PCI INTA. + */ + if (priv->irq_mode < 2) { + /* All PCI interrupts are shared using the same system IRQ */ + leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq, + "pcilvl", 0); + + priv->irq_map[0] = grpci2_build_device_irq(1); + priv->irq_map[1] = grpci2_build_device_irq(2); + priv->irq_map[2] = grpci2_build_device_irq(3); + priv->irq_map[3] = grpci2_build_device_irq(4); + + priv->virq_err = grpci2_build_device_irq(5); + if (priv->irq_mode & 1) + priv->virq_dma = ofdev->archdata.irqs[1]; + else + priv->virq_dma = grpci2_build_device_irq(6); + + /* Enable IRQs on LEON IRQ controller */ + err = request_irq(priv->irq, grpci2_jump_interrupt, 0, + "GRPCI2_JUMP", priv); + if (err) + printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n"); + } else { + /* All PCI interrupts have an unique IRQ interrupt */ + for (i = 0; i < 4; i++) { + /* Make LEON IRQ layer handle level IRQ by acking */ + leon_update_virq_handling(ofdev->archdata.irqs[i], + handle_fasteoi_irq, "pcilvl", + 1); + priv->irq_map[i] = ofdev->archdata.irqs[i]; + } + priv->virq_err = priv->irq_map[0]; + if (priv->irq_mode & 1) + priv->virq_dma = ofdev->archdata.irqs[4]; + else + priv->virq_dma = priv->irq_map[0]; + + /* Unmask all PCI interrupts, request_irq will not do that */ + REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf)); + } + + /* Setup IRQ handler for non-configuration space access errors */ + err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED, + "GRPCI2_ERR", priv); + if (err) { + printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err); + goto err5; + } + + /* + * Enable Error Interrupts. PCI interrupts are unmasked once request_irq + * is called by the PCI Device drivers + */ + REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI); + + /* Init common layer and scan buses */ + priv->info.ops = &grpci2_ops; + priv->info.map_irq = grpci2_map_irq; + leon_pci_init(ofdev, &priv->info); + + return 0; + +err5: + release_resource(&priv->info.io_space); +err4: + release_resource(&priv->info.mem_space); +err3: + err = -ENOMEM; + iounmap((void __iomem *)priv->pci_io_va); +err2: + kfree(priv); +err1: + of_iounmap(&ofdev->resource[0], regs, + resource_size(&ofdev->resource[0])); + return err; +} + +static const struct of_device_id grpci2_of_match[] __initconst = { + { + .name = "GAISLER_GRPCI2", + }, + { + .name = "01_07c", + }, + {}, +}; + +static struct platform_driver grpci2_of_driver = { + .driver = { + .name = "grpci2", + .of_match_table = grpci2_of_match, + }, + .probe = grpci2_of_probe, +}; + +static int __init grpci2_init(void) +{ + return platform_driver_register(&grpci2_of_driver); +} + +subsys_initcall(grpci2_init); diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c new file mode 100644 index 000000000..396f46bca --- /dev/null +++ b/arch/sparc/kernel/leon_pmc.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* leon_pmc.c: LEON Power-down cpu_idle() handler + * + * Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + */ + +#include +#include + +#include +#include +#include +#include + +/* List of Systems that need fixup instructions around power-down instruction */ +static unsigned int pmc_leon_fixup_ids[] = { + AEROFLEX_UT699, + GAISLER_GR712RC, + LEON4_NEXTREME1, + 0 +}; + +static int pmc_leon_need_fixup(void) +{ + unsigned int systemid = amba_system_id >> 16; + unsigned int *id; + + id = &pmc_leon_fixup_ids[0]; + while (*id != 0) { + if (*id == systemid) + return 1; + id++; + } + + return 0; +} + +/* + * CPU idle callback function for systems that need some extra handling + * See .../arch/sparc/kernel/process.c + */ +static void pmc_leon_idle_fixup(void) +{ + /* Prepare an address to a non-cachable region. APB is always + * none-cachable. One instruction is executed after the Sleep + * instruction, we make sure to read the bus and throw away the + * value by accessing a non-cachable area, also we make sure the + * MMU does not get a TLB miss here by using the MMU BYPASS ASI. + */ + register unsigned int address = (unsigned int)leon3_irqctrl_regs; + + /* Interrupts need to be enabled to not hang the CPU */ + raw_local_irq_enable(); + + __asm__ __volatile__ ( + "wr %%g0, %%asr19\n" + "lda [%0] %1, %%g0\n" + : + : "r"(address), "i"(ASI_LEON_BYPASS)); +} + +/* + * CPU idle callback function + * See .../arch/sparc/kernel/process.c + */ +static void pmc_leon_idle(void) +{ + /* Interrupts need to be enabled to not hang the CPU */ + raw_local_irq_enable(); + + /* For systems without power-down, this will be no-op */ + __asm__ __volatile__ ("wr %g0, %asr19\n\t"); +} + +/* Install LEON Power Down function */ +static int __init leon_pmc_install(void) +{ + if (sparc_cpu_model == sparc_leon) { + /* Assign power management IDLE handler */ + if (pmc_leon_need_fixup()) + sparc_idle = pmc_leon_idle_fixup; + else + sparc_idle = pmc_leon_idle; + + printk(KERN_INFO "leon: power management initialized\n"); + } + + return 0; +} + +/* This driver is not critical to the boot process, don't care + * if initialized late. + */ +late_initcall(leon_pmc_install); diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c new file mode 100644 index 000000000..991e9ad3d --- /dev/null +++ b/arch/sparc/kernel/leon_smp.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* leon_smp.c: Sparc-Leon SMP support. + * + * based on sun4m_smp.c + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" + +#include "irq.h" + +extern ctxd_t *srmmu_ctx_table_phys; +static int smp_processors_ready; +extern volatile unsigned long cpu_callin_map[NR_CPUS]; +extern cpumask_t smp_commenced_mask; +void leon_configure_cache_smp(void); +static void leon_ipi_init(void); + +/* IRQ number of LEON IPIs */ +int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT; + +static inline unsigned long do_swap(volatile unsigned long *ptr, + unsigned long val) +{ + __asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val) + : "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS) + : "memory"); + return val; +} + +void leon_cpu_pre_starting(void *arg) +{ + leon_configure_cache_smp(); +} + +void leon_cpu_pre_online(void *arg) +{ + int cpuid = hard_smp_processor_id(); + + /* Allow master to continue. The master will then give us the + * go-ahead by setting the smp_commenced_mask and will wait without + * timeouts until our setup is completed fully (signified by + * our bit being set in the cpu_online_mask). + */ + do_swap(&cpu_callin_map[cpuid], 1); + + local_ops->cache_all(); + local_ops->tlb_all(); + + /* Fix idle thread fields. */ + __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid]) + : "memory" /* paranoid */); + + /* Attach to the address space of init_task. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + + while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) + mb(); +} + +/* + * Cycle through the processors asking the PROM to start each one. + */ + +extern struct linux_prom_registers smp_penguin_ctable; + +void leon_configure_cache_smp(void) +{ + unsigned long cfg = sparc_leon3_get_dcachecfg(); + int me = smp_processor_id(); + + if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) { + printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n", + (unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg), + (unsigned int)cfg, (unsigned int)me); + sparc_leon3_disable_cache(); + } else { + if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) { + sparc_leon3_enable_snooping(); + } else { + printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n", + me); + sparc_leon3_disable_cache(); + } + } + + local_ops->cache_all(); + local_ops->tlb_all(); +} + +static void leon_smp_setbroadcast(unsigned int mask) +{ + int broadcast = + ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> + LEON3_IRQMPSTATUS_BROADCAST) & 1); + if (!broadcast) { + prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n", + leon_smp_nrcpus()); + if (leon_smp_nrcpus() > 1) { + BUG(); + } else { + prom_printf("continue anyway\n"); + return; + } + } + LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask); +} + +int leon_smp_nrcpus(void) +{ + int nrcpu = + ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> + LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1; + return nrcpu; +} + +void __init leon_boot_cpus(void) +{ + int nrcpu = leon_smp_nrcpus(); + int me = smp_processor_id(); + + /* Setup IPI */ + leon_ipi_init(); + + printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me, + (unsigned int)nrcpu, (unsigned int)NR_CPUS, + (unsigned int)&(leon3_irqctrl_regs->mpstatus)); + + leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me); + leon_enable_irq_cpu(LEON3_IRQ_TICKER, me); + leon_enable_irq_cpu(leon_ipi_irq, me); + + leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER); + + leon_configure_cache_smp(); + local_ops->cache_all(); + +} + +int leon_boot_one_cpu(int i, struct task_struct *idle) +{ + int timeout; + + current_set[i] = task_thread_info(idle); + + /* See trampoline.S:leon_smp_cpu_startup for details... + * Initialize the contexts table + * Since the call to prom_startcpu() trashes the structure, + * we need to re-initialize it for each cpu + */ + smp_penguin_ctable.which_io = 0; + smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys; + smp_penguin_ctable.reg_size = 0; + + /* whirrr, whirrr, whirrrrrrrrr... */ + printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i, + (unsigned int)&leon3_irqctrl_regs->mpstatus); + local_ops->cache_all(); + + /* Make sure all IRQs are of from the start for this new CPU */ + LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0); + + /* Wake one CPU */ + LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i); + + /* wheee... it's going... */ + for (timeout = 0; timeout < 10000; timeout++) { + if (cpu_callin_map[i]) + break; + udelay(200); + } + printk(KERN_INFO "Started CPU %d\n", (unsigned int)i); + + if (!(cpu_callin_map[i])) { + printk(KERN_ERR "Processor %d is stuck.\n", i); + return -ENODEV; + } else { + leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i); + leon_enable_irq_cpu(LEON3_IRQ_TICKER, i); + leon_enable_irq_cpu(leon_ipi_irq, i); + } + + local_ops->cache_all(); + return 0; +} + +void __init leon_smp_done(void) +{ + + int i, first; + int *prev; + + /* setup cpu list for irq rotation */ + first = 0; + prev = &first; + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i)) { + *prev = i; + prev = &cpu_data(i).next; + } + } + *prev = first; + local_ops->cache_all(); + + /* Free unneeded trap tables */ + if (!cpu_present(1)) { + free_reserved_page(virt_to_page(&trapbase_cpu1)); + } + if (!cpu_present(2)) { + free_reserved_page(virt_to_page(&trapbase_cpu2)); + } + if (!cpu_present(3)) { + free_reserved_page(virt_to_page(&trapbase_cpu3)); + } + /* Ok, they are spinning and ready to go. */ + smp_processors_ready = 1; + +} + +struct leon_ipi_work { + int single; + int msk; + int resched; +}; + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work); + +/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ + * is used for all three types of IPIs. + */ +static void __init leon_ipi_init(void) +{ + int cpu, len; + struct leon_ipi_work *work; + struct property *pp; + struct device_node *rootnp; + struct tt_entry *trap_table; + unsigned long flags; + + /* Find IPI IRQ or stick with default value */ + rootnp = of_find_node_by_path("/ambapp0"); + if (rootnp) { + pp = of_find_property(rootnp, "ipi_num", &len); + if (pp && (*(int *)pp->value)) + leon_ipi_irq = *(int *)pp->value; + } + printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq); + + /* Adjust so that we jump directly to smpleon_ipi */ + local_irq_save(flags); + trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)]; + trap_table->inst_three += smpleon_ipi - real_irq_entry; + local_ops->cache_all(); + local_irq_restore(flags); + + for_each_possible_cpu(cpu) { + work = &per_cpu(leon_ipi_work, cpu); + work->single = work->msk = work->resched = 0; + } +} + +static void leon_send_ipi(int cpu, int level) +{ + unsigned long mask; + mask = leon_get_irqmask(level); + LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask); +} + +static void leon_ipi_single(int cpu) +{ + struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); + + /* Mark work */ + work->single = 1; + + /* Generate IRQ on the CPU */ + leon_send_ipi(cpu, leon_ipi_irq); +} + +static void leon_ipi_mask_one(int cpu) +{ + struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); + + /* Mark work */ + work->msk = 1; + + /* Generate IRQ on the CPU */ + leon_send_ipi(cpu, leon_ipi_irq); +} + +static void leon_ipi_resched(int cpu) +{ + struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); + + /* Mark work */ + work->resched = 1; + + /* Generate IRQ on the CPU (any IRQ will cause resched) */ + leon_send_ipi(cpu, leon_ipi_irq); +} + +void leonsmp_ipi_interrupt(void) +{ + struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work); + + if (work->single) { + work->single = 0; + smp_call_function_single_interrupt(); + } + if (work->msk) { + work->msk = 0; + smp_call_function_interrupt(); + } + if (work->resched) { + work->resched = 0; + smp_resched_interrupt(); + } +} + +static struct smp_funcall { + void *func; + unsigned long arg1; + unsigned long arg2; + unsigned long arg3; + unsigned long arg4; + unsigned long arg5; + unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ + unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ +} ccall_info __attribute__((aligned(8))); + +static DEFINE_SPINLOCK(cross_call_lock); + +/* Cross calls must be serialized, at least currently. */ +static void leon_cross_call(void *func, cpumask_t mask, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4) +{ + if (smp_processors_ready) { + register int high = NR_CPUS - 1; + unsigned long flags; + + spin_lock_irqsave(&cross_call_lock, flags); + + { + /* If you make changes here, make sure gcc generates proper code... */ + register void *f asm("i0") = func; + register unsigned long a1 asm("i1") = arg1; + register unsigned long a2 asm("i2") = arg2; + register unsigned long a3 asm("i3") = arg3; + register unsigned long a4 asm("i4") = arg4; + register unsigned long a5 asm("i5") = 0; + + __asm__ __volatile__("std %0, [%6]\n\t" + "std %2, [%6 + 8]\n\t" + "std %4, [%6 + 16]\n\t" : : + "r"(f), "r"(a1), "r"(a2), "r"(a3), + "r"(a4), "r"(a5), + "r"(&ccall_info.func)); + } + + /* Init receive/complete mapping, plus fire the IPI's off. */ + { + register int i; + + cpumask_clear_cpu(smp_processor_id(), &mask); + cpumask_and(&mask, cpu_online_mask, &mask); + for (i = 0; i <= high; i++) { + if (cpumask_test_cpu(i, &mask)) { + ccall_info.processors_in[i] = 0; + ccall_info.processors_out[i] = 0; + leon_send_ipi(i, LEON3_IRQ_CROSS_CALL); + + } + } + } + + { + register int i; + + i = 0; + do { + if (!cpumask_test_cpu(i, &mask)) + continue; + + while (!ccall_info.processors_in[i]) + barrier(); + } while (++i <= high); + + i = 0; + do { + if (!cpumask_test_cpu(i, &mask)) + continue; + + while (!ccall_info.processors_out[i]) + barrier(); + } while (++i <= high); + } + + spin_unlock_irqrestore(&cross_call_lock, flags); + } +} + +/* Running cross calls. */ +void leon_cross_call_irq(void) +{ + void (*func)(unsigned long, unsigned long, unsigned long, unsigned long, + unsigned long) = ccall_info.func; + int i = smp_processor_id(); + + ccall_info.processors_in[i] = 1; + func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, + ccall_info.arg5); + ccall_info.processors_out[i] = 1; +} + +static const struct sparc32_ipi_ops leon_ipi_ops = { + .cross_call = leon_cross_call, + .resched = leon_ipi_resched, + .single = leon_ipi_single, + .mask_one = leon_ipi_mask_one, +}; + +void __init leon_init_smp(void) +{ + /* Patch ipi15 trap table */ + t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m); + + sparc32_ipi_ops = &leon_ipi_ops; +} diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c new file mode 100644 index 000000000..30f171b7b --- /dev/null +++ b/arch/sparc/kernel/mdesc.c @@ -0,0 +1,1353 @@ +// SPDX-License-Identifier: GPL-2.0 +/* mdesc.c: Sun4V machine description handling. + * + * Copyright (C) 2007, 2008 David S. Miller + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Unlike the OBP device tree, the machine description is a full-on + * DAG. An arbitrary number of ARCs are possible from one + * node to other nodes and thus we can't use the OBP device_node + * data structure to represent these nodes inside of the kernel. + * + * Actually, it isn't even a DAG, because there are back pointers + * which create cycles in the graph. + * + * mdesc_hdr and mdesc_elem describe the layout of the data structure + * we get from the Hypervisor. + */ +struct mdesc_hdr { + u32 version; /* Transport version */ + u32 node_sz; /* node block size */ + u32 name_sz; /* name block size */ + u32 data_sz; /* data block size */ + char data[]; +} __attribute__((aligned(16))); + +struct mdesc_elem { + u8 tag; +#define MD_LIST_END 0x00 +#define MD_NODE 0x4e +#define MD_NODE_END 0x45 +#define MD_NOOP 0x20 +#define MD_PROP_ARC 0x61 +#define MD_PROP_VAL 0x76 +#define MD_PROP_STR 0x73 +#define MD_PROP_DATA 0x64 + u8 name_len; + u16 resv; + u32 name_offset; + union { + struct { + u32 data_len; + u32 data_offset; + } data; + u64 val; + } d; +}; + +struct mdesc_mem_ops { + struct mdesc_handle *(*alloc)(unsigned int mdesc_size); + void (*free)(struct mdesc_handle *handle); +}; + +struct mdesc_handle { + struct list_head list; + struct mdesc_mem_ops *mops; + void *self_base; + refcount_t refcnt; + unsigned int handle_size; + struct mdesc_hdr mdesc; +}; + +typedef int (*mdesc_node_info_get_f)(struct mdesc_handle *, u64, + union md_node_info *); +typedef void (*mdesc_node_info_rel_f)(union md_node_info *); +typedef bool (*mdesc_node_match_f)(union md_node_info *, union md_node_info *); + +struct md_node_ops { + char *name; + mdesc_node_info_get_f get_info; + mdesc_node_info_rel_f rel_info; + mdesc_node_match_f node_match; +}; + +static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node, + union md_node_info *node_info); +static void rel_vdev_port_node_info(union md_node_info *node_info); +static bool vdev_port_node_match(union md_node_info *a_node_info, + union md_node_info *b_node_info); + +static int get_ds_port_node_info(struct mdesc_handle *md, u64 node, + union md_node_info *node_info); +static void rel_ds_port_node_info(union md_node_info *node_info); +static bool ds_port_node_match(union md_node_info *a_node_info, + union md_node_info *b_node_info); + +/* supported node types which can be registered */ +static struct md_node_ops md_node_ops_table[] = { + {"virtual-device-port", get_vdev_port_node_info, + rel_vdev_port_node_info, vdev_port_node_match}, + {"domain-services-port", get_ds_port_node_info, + rel_ds_port_node_info, ds_port_node_match}, + {NULL, NULL, NULL, NULL} +}; + +static void mdesc_get_node_ops(const char *node_name, + mdesc_node_info_get_f *get_info_f, + mdesc_node_info_rel_f *rel_info_f, + mdesc_node_match_f *match_f) +{ + int i; + + if (get_info_f) + *get_info_f = NULL; + + if (rel_info_f) + *rel_info_f = NULL; + + if (match_f) + *match_f = NULL; + + if (!node_name) + return; + + for (i = 0; md_node_ops_table[i].name != NULL; i++) { + if (strcmp(md_node_ops_table[i].name, node_name) == 0) { + if (get_info_f) + *get_info_f = md_node_ops_table[i].get_info; + + if (rel_info_f) + *rel_info_f = md_node_ops_table[i].rel_info; + + if (match_f) + *match_f = md_node_ops_table[i].node_match; + + break; + } + } +} + +static void mdesc_handle_init(struct mdesc_handle *hp, + unsigned int handle_size, + void *base) +{ + BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1)); + + memset(hp, 0, handle_size); + INIT_LIST_HEAD(&hp->list); + hp->self_base = base; + refcount_set(&hp->refcnt, 1); + hp->handle_size = handle_size; +} + +static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size) +{ + unsigned int handle_size, alloc_size; + struct mdesc_handle *hp; + unsigned long paddr; + + handle_size = (sizeof(struct mdesc_handle) - + sizeof(struct mdesc_hdr) + + mdesc_size); + alloc_size = PAGE_ALIGN(handle_size); + + paddr = memblock_phys_alloc(alloc_size, PAGE_SIZE); + + hp = NULL; + if (paddr) { + hp = __va(paddr); + mdesc_handle_init(hp, handle_size, hp); + } + return hp; +} + +static void __init mdesc_memblock_free(struct mdesc_handle *hp) +{ + unsigned int alloc_size; + unsigned long start; + + BUG_ON(refcount_read(&hp->refcnt) != 0); + BUG_ON(!list_empty(&hp->list)); + + alloc_size = PAGE_ALIGN(hp->handle_size); + start = __pa(hp); + memblock_free_late(start, alloc_size); +} + +static struct mdesc_mem_ops memblock_mdesc_ops = { + .alloc = mdesc_memblock_alloc, + .free = mdesc_memblock_free, +}; + +static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) +{ + unsigned int handle_size; + struct mdesc_handle *hp; + unsigned long addr; + void *base; + + handle_size = (sizeof(struct mdesc_handle) - + sizeof(struct mdesc_hdr) + + mdesc_size); + base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!base) + return NULL; + + addr = (unsigned long)base; + addr = (addr + 15UL) & ~15UL; + hp = (struct mdesc_handle *) addr; + + mdesc_handle_init(hp, handle_size, base); + + return hp; +} + +static void mdesc_kfree(struct mdesc_handle *hp) +{ + BUG_ON(refcount_read(&hp->refcnt) != 0); + BUG_ON(!list_empty(&hp->list)); + + kfree(hp->self_base); +} + +static struct mdesc_mem_ops kmalloc_mdesc_memops = { + .alloc = mdesc_kmalloc, + .free = mdesc_kfree, +}; + +static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size, + struct mdesc_mem_ops *mops) +{ + struct mdesc_handle *hp = mops->alloc(mdesc_size); + + if (hp) + hp->mops = mops; + + return hp; +} + +static void mdesc_free(struct mdesc_handle *hp) +{ + hp->mops->free(hp); +} + +static struct mdesc_handle *cur_mdesc; +static LIST_HEAD(mdesc_zombie_list); +static DEFINE_SPINLOCK(mdesc_lock); + +struct mdesc_handle *mdesc_grab(void) +{ + struct mdesc_handle *hp; + unsigned long flags; + + spin_lock_irqsave(&mdesc_lock, flags); + hp = cur_mdesc; + if (hp) + refcount_inc(&hp->refcnt); + spin_unlock_irqrestore(&mdesc_lock, flags); + + return hp; +} +EXPORT_SYMBOL(mdesc_grab); + +void mdesc_release(struct mdesc_handle *hp) +{ + unsigned long flags; + + spin_lock_irqsave(&mdesc_lock, flags); + if (refcount_dec_and_test(&hp->refcnt)) { + list_del_init(&hp->list); + hp->mops->free(hp); + } + spin_unlock_irqrestore(&mdesc_lock, flags); +} +EXPORT_SYMBOL(mdesc_release); + +static DEFINE_MUTEX(mdesc_mutex); +static struct mdesc_notifier_client *client_list; + +void mdesc_register_notifier(struct mdesc_notifier_client *client) +{ + bool supported = false; + u64 node; + int i; + + mutex_lock(&mdesc_mutex); + + /* check to see if the node is supported for registration */ + for (i = 0; md_node_ops_table[i].name != NULL; i++) { + if (strcmp(md_node_ops_table[i].name, client->node_name) == 0) { + supported = true; + break; + } + } + + if (!supported) { + pr_err("MD: %s node not supported\n", client->node_name); + mutex_unlock(&mdesc_mutex); + return; + } + + client->next = client_list; + client_list = client; + + mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name) + client->add(cur_mdesc, node, client->node_name); + + mutex_unlock(&mdesc_mutex); +} + +static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node) +{ + const u64 *id; + u64 a; + + id = NULL; + mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { + u64 target; + + target = mdesc_arc_target(hp, a); + id = mdesc_get_property(hp, target, + "cfg-handle", NULL); + if (id) + break; + } + + return id; +} + +static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node, + union md_node_info *node_info) +{ + const u64 *parent_cfg_hdlp; + const char *name; + const u64 *idp; + + /* + * Virtual device nodes are distinguished by: + * 1. "id" property + * 2. "name" property + * 3. parent node "cfg-handle" property + */ + idp = mdesc_get_property(md, node, "id", NULL); + name = mdesc_get_property(md, node, "name", NULL); + parent_cfg_hdlp = parent_cfg_handle(md, node); + + if (!idp || !name || !parent_cfg_hdlp) + return -1; + + node_info->vdev_port.id = *idp; + node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL); + if (!node_info->vdev_port.name) + return -1; + node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp; + + return 0; +} + +static void rel_vdev_port_node_info(union md_node_info *node_info) +{ + if (node_info && node_info->vdev_port.name) { + kfree_const(node_info->vdev_port.name); + node_info->vdev_port.name = NULL; + } +} + +static bool vdev_port_node_match(union md_node_info *a_node_info, + union md_node_info *b_node_info) +{ + if (a_node_info->vdev_port.id != b_node_info->vdev_port.id) + return false; + + if (a_node_info->vdev_port.parent_cfg_hdl != + b_node_info->vdev_port.parent_cfg_hdl) + return false; + + if (strncmp(a_node_info->vdev_port.name, + b_node_info->vdev_port.name, MDESC_MAX_STR_LEN) != 0) + return false; + + return true; +} + +static int get_ds_port_node_info(struct mdesc_handle *md, u64 node, + union md_node_info *node_info) +{ + const u64 *idp; + + /* DS port nodes use the "id" property to distinguish them */ + idp = mdesc_get_property(md, node, "id", NULL); + if (!idp) + return -1; + + node_info->ds_port.id = *idp; + + return 0; +} + +static void rel_ds_port_node_info(union md_node_info *node_info) +{ +} + +static bool ds_port_node_match(union md_node_info *a_node_info, + union md_node_info *b_node_info) +{ + if (a_node_info->ds_port.id != b_node_info->ds_port.id) + return false; + + return true; +} + +/* Run 'func' on nodes which are in A but not in B. */ +static void invoke_on_missing(const char *name, + struct mdesc_handle *a, + struct mdesc_handle *b, + void (*func)(struct mdesc_handle *, u64, + const char *node_name)) +{ + mdesc_node_info_get_f get_info_func; + mdesc_node_info_rel_f rel_info_func; + mdesc_node_match_f node_match_func; + union md_node_info a_node_info; + union md_node_info b_node_info; + bool found; + u64 a_node; + u64 b_node; + int rv; + + /* + * Find the get_info, rel_info and node_match ops for the given + * node name + */ + mdesc_get_node_ops(name, &get_info_func, &rel_info_func, + &node_match_func); + + /* If we didn't find a match, the node type is not supported */ + if (!get_info_func || !rel_info_func || !node_match_func) { + pr_err("MD: %s node type is not supported\n", name); + return; + } + + mdesc_for_each_node_by_name(a, a_node, name) { + found = false; + + rv = get_info_func(a, a_node, &a_node_info); + if (rv != 0) { + pr_err("MD: Cannot find 1 or more required match properties for %s node.\n", + name); + continue; + } + + /* Check each node in B for node matching a_node */ + mdesc_for_each_node_by_name(b, b_node, name) { + rv = get_info_func(b, b_node, &b_node_info); + if (rv != 0) + continue; + + if (node_match_func(&a_node_info, &b_node_info)) { + found = true; + rel_info_func(&b_node_info); + break; + } + + rel_info_func(&b_node_info); + } + + rel_info_func(&a_node_info); + + if (!found) + func(a, a_node, name); + } +} + +static void notify_one(struct mdesc_notifier_client *p, + struct mdesc_handle *old_hp, + struct mdesc_handle *new_hp) +{ + invoke_on_missing(p->node_name, old_hp, new_hp, p->remove); + invoke_on_missing(p->node_name, new_hp, old_hp, p->add); +} + +static void mdesc_notify_clients(struct mdesc_handle *old_hp, + struct mdesc_handle *new_hp) +{ + struct mdesc_notifier_client *p = client_list; + + while (p) { + notify_one(p, old_hp, new_hp); + p = p->next; + } +} + +void mdesc_update(void) +{ + unsigned long len, real_len, status; + struct mdesc_handle *hp, *orig_hp; + unsigned long flags; + + mutex_lock(&mdesc_mutex); + + (void) sun4v_mach_desc(0UL, 0UL, &len); + + hp = mdesc_alloc(len, &kmalloc_mdesc_memops); + if (!hp) { + printk(KERN_ERR "MD: mdesc alloc fails\n"); + goto out; + } + + status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); + if (status != HV_EOK || real_len > len) { + printk(KERN_ERR "MD: mdesc reread fails with %lu\n", + status); + refcount_dec(&hp->refcnt); + mdesc_free(hp); + goto out; + } + + spin_lock_irqsave(&mdesc_lock, flags); + orig_hp = cur_mdesc; + cur_mdesc = hp; + spin_unlock_irqrestore(&mdesc_lock, flags); + + mdesc_notify_clients(orig_hp, hp); + + spin_lock_irqsave(&mdesc_lock, flags); + if (refcount_dec_and_test(&orig_hp->refcnt)) + mdesc_free(orig_hp); + else + list_add(&orig_hp->list, &mdesc_zombie_list); + spin_unlock_irqrestore(&mdesc_lock, flags); + +out: + mutex_unlock(&mdesc_mutex); +} + +u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name, + union md_node_info *node_info) +{ + mdesc_node_info_get_f get_info_func; + mdesc_node_info_rel_f rel_info_func; + mdesc_node_match_f node_match_func; + union md_node_info hp_node_info; + u64 hp_node; + int rv; + + if (hp == NULL || node_name == NULL || node_info == NULL) + return MDESC_NODE_NULL; + + /* Find the ops for the given node name */ + mdesc_get_node_ops(node_name, &get_info_func, &rel_info_func, + &node_match_func); + + /* If we didn't find ops for the given node name, it is not supported */ + if (!get_info_func || !rel_info_func || !node_match_func) { + pr_err("MD: %s node is not supported\n", node_name); + return -EINVAL; + } + + mdesc_for_each_node_by_name(hp, hp_node, node_name) { + rv = get_info_func(hp, hp_node, &hp_node_info); + if (rv != 0) + continue; + + if (node_match_func(node_info, &hp_node_info)) + break; + + rel_info_func(&hp_node_info); + } + + rel_info_func(&hp_node_info); + + return hp_node; +} +EXPORT_SYMBOL(mdesc_get_node); + +int mdesc_get_node_info(struct mdesc_handle *hp, u64 node, + const char *node_name, union md_node_info *node_info) +{ + mdesc_node_info_get_f get_info_func; + int rv; + + if (hp == NULL || node == MDESC_NODE_NULL || + node_name == NULL || node_info == NULL) + return -EINVAL; + + /* Find the get_info op for the given node name */ + mdesc_get_node_ops(node_name, &get_info_func, NULL, NULL); + + /* If we didn't find a get_info_func, the node name is not supported */ + if (get_info_func == NULL) { + pr_err("MD: %s node is not supported\n", node_name); + return -EINVAL; + } + + rv = get_info_func(hp, node, node_info); + if (rv != 0) { + pr_err("MD: Cannot find 1 or more required match properties for %s node.\n", + node_name); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(mdesc_get_node_info); + +static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) +{ + return (struct mdesc_elem *) mdesc->data; +} + +static void *name_block(struct mdesc_hdr *mdesc) +{ + return ((void *) node_block(mdesc)) + mdesc->node_sz; +} + +static void *data_block(struct mdesc_hdr *mdesc) +{ + return ((void *) name_block(mdesc)) + mdesc->name_sz; +} + +u64 mdesc_node_by_name(struct mdesc_handle *hp, + u64 from_node, const char *name) +{ + struct mdesc_elem *ep = node_block(&hp->mdesc); + const char *names = name_block(&hp->mdesc); + u64 last_node = hp->mdesc.node_sz / 16; + u64 ret; + + if (from_node == MDESC_NODE_NULL) { + ret = from_node = 0; + } else if (from_node >= last_node) { + return MDESC_NODE_NULL; + } else { + ret = ep[from_node].d.val; + } + + while (ret < last_node) { + if (ep[ret].tag != MD_NODE) + return MDESC_NODE_NULL; + if (!strcmp(names + ep[ret].name_offset, name)) + break; + ret = ep[ret].d.val; + } + if (ret >= last_node) + ret = MDESC_NODE_NULL; + return ret; +} +EXPORT_SYMBOL(mdesc_node_by_name); + +const void *mdesc_get_property(struct mdesc_handle *hp, u64 node, + const char *name, int *lenp) +{ + const char *names = name_block(&hp->mdesc); + u64 last_node = hp->mdesc.node_sz / 16; + void *data = data_block(&hp->mdesc); + struct mdesc_elem *ep; + + if (node == MDESC_NODE_NULL || node >= last_node) + return NULL; + + ep = node_block(&hp->mdesc) + node; + ep++; + for (; ep->tag != MD_NODE_END; ep++) { + void *val = NULL; + int len = 0; + + switch (ep->tag) { + case MD_PROP_VAL: + val = &ep->d.val; + len = 8; + break; + + case MD_PROP_STR: + case MD_PROP_DATA: + val = data + ep->d.data.data_offset; + len = ep->d.data.data_len; + break; + + default: + break; + } + if (!val) + continue; + + if (!strcmp(names + ep->name_offset, name)) { + if (lenp) + *lenp = len; + return val; + } + } + + return NULL; +} +EXPORT_SYMBOL(mdesc_get_property); + +u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type) +{ + struct mdesc_elem *ep, *base = node_block(&hp->mdesc); + const char *names = name_block(&hp->mdesc); + u64 last_node = hp->mdesc.node_sz / 16; + + if (from == MDESC_NODE_NULL || from >= last_node) + return MDESC_NODE_NULL; + + ep = base + from; + + ep++; + for (; ep->tag != MD_NODE_END; ep++) { + if (ep->tag != MD_PROP_ARC) + continue; + + if (strcmp(names + ep->name_offset, arc_type)) + continue; + + return ep - base; + } + + return MDESC_NODE_NULL; +} +EXPORT_SYMBOL(mdesc_next_arc); + +u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc) +{ + struct mdesc_elem *ep, *base = node_block(&hp->mdesc); + + ep = base + arc; + + return ep->d.val; +} +EXPORT_SYMBOL(mdesc_arc_target); + +const char *mdesc_node_name(struct mdesc_handle *hp, u64 node) +{ + struct mdesc_elem *ep, *base = node_block(&hp->mdesc); + const char *names = name_block(&hp->mdesc); + u64 last_node = hp->mdesc.node_sz / 16; + + if (node == MDESC_NODE_NULL || node >= last_node) + return NULL; + + ep = base + node; + if (ep->tag != MD_NODE) + return NULL; + + return names + ep->name_offset; +} +EXPORT_SYMBOL(mdesc_node_name); + +static u64 max_cpus = 64; + +static void __init report_platform_properties(void) +{ + struct mdesc_handle *hp = mdesc_grab(); + u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); + const char *s; + const u64 *v; + + if (pn == MDESC_NODE_NULL) { + prom_printf("No platform node in machine-description.\n"); + prom_halt(); + } + + s = mdesc_get_property(hp, pn, "banner-name", NULL); + printk("PLATFORM: banner-name [%s]\n", s); + s = mdesc_get_property(hp, pn, "name", NULL); + printk("PLATFORM: name [%s]\n", s); + + v = mdesc_get_property(hp, pn, "hostid", NULL); + if (v) + printk("PLATFORM: hostid [%08llx]\n", *v); + v = mdesc_get_property(hp, pn, "serial#", NULL); + if (v) + printk("PLATFORM: serial# [%08llx]\n", *v); + v = mdesc_get_property(hp, pn, "stick-frequency", NULL); + printk("PLATFORM: stick-frequency [%08llx]\n", *v); + v = mdesc_get_property(hp, pn, "mac-address", NULL); + if (v) + printk("PLATFORM: mac-address [%llx]\n", *v); + v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL); + if (v) + printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v); + v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL); + if (v) + printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v); + v = mdesc_get_property(hp, pn, "max-cpus", NULL); + if (v) { + max_cpus = *v; + printk("PLATFORM: max-cpus [%llu]\n", max_cpus); + } + +#ifdef CONFIG_SMP + { + int max_cpu, i; + + if (v) { + max_cpu = *v; + if (max_cpu > NR_CPUS) + max_cpu = NR_CPUS; + } else { + max_cpu = NR_CPUS; + } + for (i = 0; i < max_cpu; i++) + set_cpu_possible(i, true); + } +#endif + + mdesc_release(hp); +} + +static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) +{ + const u64 *level = mdesc_get_property(hp, mp, "level", NULL); + const u64 *size = mdesc_get_property(hp, mp, "size", NULL); + const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL); + const char *type; + int type_len; + + type = mdesc_get_property(hp, mp, "type", &type_len); + + switch (*level) { + case 1: + if (of_find_in_proplist(type, "instn", type_len)) { + c->icache_size = *size; + c->icache_line_size = *line_size; + } else if (of_find_in_proplist(type, "data", type_len)) { + c->dcache_size = *size; + c->dcache_line_size = *line_size; + } + break; + + case 2: + c->ecache_size = *size; + c->ecache_line_size = *line_size; + break; + + default: + break; + } + + if (*level == 1) { + u64 a; + + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { + u64 target = mdesc_arc_target(hp, a); + const char *name = mdesc_node_name(hp, target); + + if (!strcmp(name, "cache")) + fill_in_one_cache(c, hp, target); + } + } +} + +static void find_back_node_value(struct mdesc_handle *hp, u64 node, + char *srch_val, + void (*func)(struct mdesc_handle *, u64, int), + u64 val, int depth) +{ + u64 arc; + + /* Since we have an estimate of recursion depth, do a sanity check. */ + if (depth == 0) + return; + + mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) { + u64 n = mdesc_arc_target(hp, arc); + const char *name = mdesc_node_name(hp, n); + + if (!strcmp(srch_val, name)) + (*func)(hp, n, val); + + find_back_node_value(hp, n, srch_val, func, val, depth-1); + } +} + +static void __mark_core_id(struct mdesc_handle *hp, u64 node, + int core_id) +{ + const u64 *id = mdesc_get_property(hp, node, "id", NULL); + + if (*id < num_possible_cpus()) + cpu_data(*id).core_id = core_id; +} + +static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node, + int max_cache_id) +{ + const u64 *id = mdesc_get_property(hp, node, "id", NULL); + + if (*id < num_possible_cpus()) { + cpu_data(*id).max_cache_id = max_cache_id; + + /** + * On systems without explicit socket descriptions socket + * is max_cache_id + */ + cpu_data(*id).sock_id = max_cache_id; + } +} + +static void mark_core_ids(struct mdesc_handle *hp, u64 mp, + int core_id) +{ + find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); +} + +static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp, + int max_cache_id) +{ + find_back_node_value(hp, mp, "cpu", __mark_max_cache_id, + max_cache_id, 10); +} + +static void set_core_ids(struct mdesc_handle *hp) +{ + int idx; + u64 mp; + + idx = 1; + + /* Identify unique cores by looking for cpus backpointed to by + * level 1 instruction caches. + */ + mdesc_for_each_node_by_name(hp, mp, "cache") { + const u64 *level; + const char *type; + int len; + + level = mdesc_get_property(hp, mp, "level", NULL); + if (*level != 1) + continue; + + type = mdesc_get_property(hp, mp, "type", &len); + if (!of_find_in_proplist(type, "instn", len)) + continue; + + mark_core_ids(hp, mp, idx); + idx++; + } +} + +static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level) +{ + u64 mp; + int idx = 1; + int fnd = 0; + + /** + * Identify unique highest level of shared cache by looking for cpus + * backpointed to by shared level N caches. + */ + mdesc_for_each_node_by_name(hp, mp, "cache") { + const u64 *cur_lvl; + + cur_lvl = mdesc_get_property(hp, mp, "level", NULL); + if (*cur_lvl != level) + continue; + mark_max_cache_ids(hp, mp, idx); + idx++; + fnd = 1; + } + return fnd; +} + +static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp) +{ + int idx = 1; + + mdesc_for_each_node_by_name(hp, mp, "socket") { + u64 a; + + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { + u64 t = mdesc_arc_target(hp, a); + const char *name; + const u64 *id; + + name = mdesc_node_name(hp, t); + if (strcmp(name, "cpu")) + continue; + + id = mdesc_get_property(hp, t, "id", NULL); + if (*id < num_possible_cpus()) + cpu_data(*id).sock_id = idx; + } + idx++; + } +} + +static void set_sock_ids(struct mdesc_handle *hp) +{ + u64 mp; + + /** + * Find the highest level of shared cache which pre-T7 is also + * the socket. + */ + if (!set_max_cache_ids_by_cache(hp, 3)) + set_max_cache_ids_by_cache(hp, 2); + + /* If machine description exposes sockets data use it.*/ + mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); + if (mp != MDESC_NODE_NULL) + set_sock_ids_by_socket(hp, mp); +} + +static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) +{ + u64 a; + + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { + u64 t = mdesc_arc_target(hp, a); + const char *name; + const u64 *id; + + name = mdesc_node_name(hp, t); + if (strcmp(name, "cpu")) + continue; + + id = mdesc_get_property(hp, t, "id", NULL); + if (*id < NR_CPUS) + cpu_data(*id).proc_id = proc_id; + } +} + +static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) +{ + int idx; + u64 mp; + + idx = 0; + mdesc_for_each_node_by_name(hp, mp, exec_unit_name) { + const char *type; + int len; + + type = mdesc_get_property(hp, mp, "type", &len); + if (!of_find_in_proplist(type, "int", len) && + !of_find_in_proplist(type, "integer", len)) + continue; + + mark_proc_ids(hp, mp, idx); + idx++; + } +} + +static void set_proc_ids(struct mdesc_handle *hp) +{ + __set_proc_ids(hp, "exec_unit"); + __set_proc_ids(hp, "exec-unit"); +} + +static void get_one_mondo_bits(const u64 *p, unsigned int *mask, + unsigned long def, unsigned long max) +{ + u64 val; + + if (!p) + goto use_default; + val = *p; + + if (!val || val >= 64) + goto use_default; + + if (val > max) + val = max; + + *mask = ((1U << val) * 64U) - 1U; + return; + +use_default: + *mask = ((1U << def) * 64U) - 1U; +} + +static void get_mondo_data(struct mdesc_handle *hp, u64 mp, + struct trap_per_cpu *tb) +{ + static int printed; + const u64 *val; + + val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL); + get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2)); + + val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL); + get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8); + + val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL); + get_one_mondo_bits(val, &tb->resum_qmask, 6, 7); + + val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL); + get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2); + if (!printed++) { + pr_info("SUN4V: Mondo queue sizes " + "[cpu(%u) dev(%u) r(%u) nr(%u)]\n", + tb->cpu_mondo_qmask + 1, + tb->dev_mondo_qmask + 1, + tb->resum_qmask + 1, + tb->nonresum_qmask + 1); + } +} + +static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) +{ + struct mdesc_handle *hp = mdesc_grab(); + void *ret = NULL; + u64 mp; + + mdesc_for_each_node_by_name(hp, mp, "cpu") { + const u64 *id = mdesc_get_property(hp, mp, "id", NULL); + int cpuid = *id; + +#ifdef CONFIG_SMP + if (cpuid >= NR_CPUS) { + printk(KERN_WARNING "Ignoring CPU %d which is " + ">= NR_CPUS (%d)\n", + cpuid, NR_CPUS); + continue; + } + if (!cpumask_test_cpu(cpuid, mask)) + continue; +#endif + + ret = func(hp, mp, cpuid, arg); + if (ret) + goto out; + } +out: + mdesc_release(hp); + return ret; +} + +static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, + void *arg) +{ + ncpus_probed++; +#ifdef CONFIG_SMP + set_cpu_present(cpuid, true); +#endif + return NULL; +} + +void mdesc_populate_present_mask(cpumask_t *mask) +{ + if (tlb_type != hypervisor) + return; + + ncpus_probed = 0; + mdesc_iterate_over_cpus(record_one_cpu, NULL, mask); +} + +static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) +{ + const u64 *pgsz_prop = mdesc_get_property(hp, mp, "mmu-page-size-list", NULL); + unsigned long *pgsz_mask = arg; + u64 val; + + val = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K | + HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB); + if (pgsz_prop) + val = *pgsz_prop; + + if (!*pgsz_mask) + *pgsz_mask = val; + else + *pgsz_mask &= val; + return NULL; +} + +void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask) +{ + *pgsz_mask = 0; + mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask); +} + +static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, + void *arg) +{ + const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); + struct trap_per_cpu *tb; + cpuinfo_sparc *c; + u64 a; + +#ifndef CONFIG_SMP + /* On uniprocessor we only want the values for the + * real physical cpu the kernel booted onto, however + * cpu_data() only has one entry at index 0. + */ + if (cpuid != real_hard_smp_processor_id()) + return NULL; + cpuid = 0; +#endif + + c = &cpu_data(cpuid); + c->clock_tick = *cfreq; + + tb = &trap_block[cpuid]; + get_mondo_data(hp, mp, tb); + + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { + u64 j, t = mdesc_arc_target(hp, a); + const char *t_name; + + t_name = mdesc_node_name(hp, t); + if (!strcmp(t_name, "cache")) { + fill_in_one_cache(c, hp, t); + continue; + } + + mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { + u64 n = mdesc_arc_target(hp, j); + const char *n_name; + + n_name = mdesc_node_name(hp, n); + if (!strcmp(n_name, "cache")) + fill_in_one_cache(c, hp, n); + } + } + + c->core_id = 0; + c->proc_id = -1; + + return NULL; +} + +void mdesc_fill_in_cpu_data(cpumask_t *mask) +{ + struct mdesc_handle *hp; + + mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask); + + hp = mdesc_grab(); + + set_core_ids(hp); + set_proc_ids(hp); + set_sock_ids(hp); + + mdesc_release(hp); + + smp_fill_in_sib_core_maps(); +} + +/* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is + * opened. Hold this reference until /dev/mdesc is closed to ensure + * mdesc data structure is not released underneath us. Store the + * pointer to mdesc structure in private_data for read and seek to use + */ +static int mdesc_open(struct inode *inode, struct file *file) +{ + struct mdesc_handle *hp = mdesc_grab(); + + if (!hp) + return -ENODEV; + + file->private_data = hp; + + return 0; +} + +static ssize_t mdesc_read(struct file *file, char __user *buf, + size_t len, loff_t *offp) +{ + struct mdesc_handle *hp = file->private_data; + unsigned char *mdesc; + int bytes_left, count = len; + + if (*offp >= hp->handle_size) + return 0; + + bytes_left = hp->handle_size - *offp; + if (count > bytes_left) + count = bytes_left; + + mdesc = (unsigned char *)&hp->mdesc; + mdesc += *offp; + if (!copy_to_user(buf, mdesc, count)) { + *offp += count; + return count; + } else { + return -EFAULT; + } +} + +static loff_t mdesc_llseek(struct file *file, loff_t offset, int whence) +{ + struct mdesc_handle *hp = file->private_data; + + return no_seek_end_llseek_size(file, offset, whence, hp->handle_size); +} + +/* mdesc_close() - /dev/mdesc is being closed, release the reference to + * mdesc structure. + */ +static int mdesc_close(struct inode *inode, struct file *file) +{ + mdesc_release(file->private_data); + return 0; +} + +static const struct file_operations mdesc_fops = { + .open = mdesc_open, + .read = mdesc_read, + .llseek = mdesc_llseek, + .release = mdesc_close, + .owner = THIS_MODULE, +}; + +static struct miscdevice mdesc_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "mdesc", + .fops = &mdesc_fops, +}; + +static int __init mdesc_misc_init(void) +{ + return misc_register(&mdesc_misc); +} + +__initcall(mdesc_misc_init); + +void __init sun4v_mdesc_init(void) +{ + struct mdesc_handle *hp; + unsigned long len, real_len, status; + + (void) sun4v_mach_desc(0UL, 0UL, &len); + + printk("MDESC: Size is %lu bytes.\n", len); + + hp = mdesc_alloc(len, &memblock_mdesc_ops); + if (hp == NULL) { + prom_printf("MDESC: alloc of %lu bytes failed.\n", len); + prom_halt(); + } + + status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); + if (status != HV_EOK || real_len > len) { + prom_printf("sun4v_mach_desc fails, err(%lu), " + "len(%lu), real_len(%lu)\n", + status, len, real_len); + mdesc_free(hp); + prom_halt(); + } + + cur_mdesc = hp; + + mdesc_adi_init(); + report_platform_properties(); +} diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S new file mode 100644 index 000000000..b5c841775 --- /dev/null +++ b/arch/sparc/kernel/misctrap.S @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef CONFIG_KGDB + .globl arch_kgdb_breakpoint + .type arch_kgdb_breakpoint,#function +arch_kgdb_breakpoint: + ta 0x72 + retl + nop + .size arch_kgdb_breakpoint,.-arch_kgdb_breakpoint +#endif + + .type __do_privact,#function +__do_privact: + mov TLB_SFSR, %g3 + stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit + membar #Sync + sethi %hi(109f), %g7 + ba,pt %xcc, etrap +109: or %g7, %lo(109b), %g7 + call do_privact + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size __do_privact,.-__do_privact + + .type do_mna,#function +do_mna: + rdpr %tl, %g3 + cmp %g3, 1 + + /* Setup %g4/%g5 now as they are used in the + * winfixup code. + */ + mov TLB_SFSR, %g3 + mov DMMU_SFAR, %g4 + ldxa [%g4] ASI_DMMU, %g4 + ldxa [%g3] ASI_DMMU, %g5 + stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit + membar #Sync + bgu,pn %icc, winfix_mna + rdpr %tpc, %g3 + +1: sethi %hi(109f), %g7 + ba,pt %xcc, etrap +109: or %g7, %lo(109b), %g7 + mov %l4, %o1 + mov %l5, %o2 + call mem_address_unaligned + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size do_mna,.-do_mna + + .type do_lddfmna,#function +do_lddfmna: + sethi %hi(109f), %g7 + mov TLB_SFSR, %g4 + ldxa [%g4] ASI_DMMU, %g5 + stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit + membar #Sync + mov DMMU_SFAR, %g4 + ldxa [%g4] ASI_DMMU, %g4 + ba,pt %xcc, etrap +109: or %g7, %lo(109b), %g7 + mov %l4, %o1 + mov %l5, %o2 + call handle_lddfmna + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size do_lddfmna,.-do_lddfmna + + .type do_stdfmna,#function +do_stdfmna: + sethi %hi(109f), %g7 + mov TLB_SFSR, %g4 + ldxa [%g4] ASI_DMMU, %g5 + stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit + membar #Sync + mov DMMU_SFAR, %g4 + ldxa [%g4] ASI_DMMU, %g4 + ba,pt %xcc, etrap +109: or %g7, %lo(109b), %g7 + mov %l4, %o1 + mov %l5, %o2 + call handle_stdfmna + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + nop + .size do_stdfmna,.-do_stdfmna + + .type breakpoint_trap,#function +breakpoint_trap: + call sparc_breakpoint + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + .size breakpoint_trap,.-breakpoint_trap diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c new file mode 100644 index 000000000..66c45a276 --- /dev/null +++ b/arch/sparc/kernel/module.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Kernel module help for sparc64. + * + * Copyright (C) 2001 Rusty Russell. + * Copyright (C) 2002 David S. Miller. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "entry.h" + +#ifdef CONFIG_SPARC64 + +#include + +static void *module_map(unsigned long size) +{ + if (PAGE_ALIGN(size) > MODULES_LEN) + return NULL; + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); +} +#else +static void *module_map(unsigned long size) +{ + return vmalloc(size); +} +#endif /* CONFIG_SPARC64 */ + +void *module_alloc(unsigned long size) +{ + void *ret; + + ret = module_map(size); + if (ret) + memset(ret, 0, size); + + return ret; +} + +/* Make generic code ignore STT_REGISTER dummy undefined symbols. */ +int module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod) +{ + unsigned int symidx; + Elf_Sym *sym; + char *strtab; + int i; + + for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) { + if (symidx == hdr->e_shnum-1) { + printk("%s: no symtab found.\n", mod->name); + return -ENOEXEC; + } + } + sym = (Elf_Sym *)sechdrs[symidx].sh_addr; + strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr; + + for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) { + if (sym[i].st_shndx == SHN_UNDEF) { + if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER) + sym[i].st_shndx = SHN_ABS; + } + } + return 0; +} + +int apply_relocate_add(Elf_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr; + Elf_Sym *sym; + u8 *location; + u32 *loc32; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + Elf_Addr v; + + /* This is where to make the change */ + location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + loc32 = (u32 *) location; + +#ifdef CONFIG_SPARC64 + BUG_ON(((u64)location >> (u64)32) != (u64)0); +#endif /* CONFIG_SPARC64 */ + + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf_Sym *)sechdrs[symindex].sh_addr + + ELF_R_SYM(rel[i].r_info); + v = sym->st_value + rel[i].r_addend; + + switch (ELF_R_TYPE(rel[i].r_info) & 0xff) { + case R_SPARC_DISP32: + v -= (Elf_Addr) location; + *loc32 = v; + break; +#ifdef CONFIG_SPARC64 + case R_SPARC_64: + location[0] = v >> 56; + location[1] = v >> 48; + location[2] = v >> 40; + location[3] = v >> 32; + location[4] = v >> 24; + location[5] = v >> 16; + location[6] = v >> 8; + location[7] = v >> 0; + break; + + case R_SPARC_WDISP19: + v -= (Elf_Addr) location; + *loc32 = (*loc32 & ~0x7ffff) | + ((v >> 2) & 0x7ffff); + break; + + case R_SPARC_OLO10: + *loc32 = (*loc32 & ~0x1fff) | + (((v & 0x3ff) + + (ELF_R_TYPE(rel[i].r_info) >> 8)) + & 0x1fff); + break; +#endif /* CONFIG_SPARC64 */ + + case R_SPARC_32: + case R_SPARC_UA32: + location[0] = v >> 24; + location[1] = v >> 16; + location[2] = v >> 8; + location[3] = v >> 0; + break; + + case R_SPARC_WDISP30: + v -= (Elf_Addr) location; + *loc32 = (*loc32 & ~0x3fffffff) | + ((v >> 2) & 0x3fffffff); + break; + + case R_SPARC_WDISP22: + v -= (Elf_Addr) location; + *loc32 = (*loc32 & ~0x3fffff) | + ((v >> 2) & 0x3fffff); + break; + + case R_SPARC_LO10: + *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff); + break; + + case R_SPARC_HI22: + *loc32 = (*loc32 & ~0x3fffff) | + ((v >> 10) & 0x3fffff); + break; + + default: + printk(KERN_ERR "module %s: Unknown relocation: %x\n", + me->name, + (int) (ELF_R_TYPE(rel[i].r_info) & 0xff)); + return -ENOEXEC; + } + } + return 0; +} + +#ifdef CONFIG_SPARC64 +static void do_patch_sections(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs) +{ + const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL; + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name)) + sun4v_1insn = s; + if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name)) + sun4v_2insn = s; + } + + if (sun4v_1insn && tlb_type == hypervisor) { + void *p = (void *) sun4v_1insn->sh_addr; + sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size); + } + if (sun4v_2insn && tlb_type == hypervisor) { + void *p = (void *) sun4v_2insn->sh_addr; + sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size); + } +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + do_patch_sections(hdr, sechdrs); + + /* Cheetah's I-cache is fully coherent. */ + if (tlb_type == spitfire) { + unsigned long va; + + flushw_all(); + for (va = 0; va < (PAGE_SIZE << 1); va += 32) + spitfire_put_icache_tag(va, 0x0); + __asm__ __volatile__("flush %g6"); + } + + return 0; +} +#endif /* CONFIG_SPARC64 */ diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c new file mode 100644 index 000000000..060fff95a --- /dev/null +++ b/arch/sparc/kernel/nmi.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Pseudo NMI support on sparc64 systems. + * + * Copyright (C) 2009 David S. Miller + * + * The NMI watchdog support and infrastructure is based almost + * entirely upon the x86 NMI support code. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "kstack.h" + +/* We don't have a real NMI on sparc64, but we can fake one + * up using profiling counter overflow interrupts and interrupt + * levels. + * + * The profile overflow interrupts at level 15, so we use + * level 14 as our IRQ off level. + */ + +static int panic_on_timeout; + +/* nmi_active: + * >0: the NMI watchdog is active, but can be disabled + * <0: the NMI watchdog has not been set up, and cannot be enabled + * 0: the NMI watchdog is disabled, but can be enabled + */ +atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ +EXPORT_SYMBOL(nmi_active); +static int nmi_init_done; +static unsigned int nmi_hz = HZ; +static DEFINE_PER_CPU(short, wd_enabled); +static int endflag __initdata; + +static DEFINE_PER_CPU(unsigned int, last_irq_sum); +static DEFINE_PER_CPU(long, alert_counter); +static DEFINE_PER_CPU(int, nmi_touch); + +void arch_touch_nmi_watchdog(void) +{ + if (atomic_read(&nmi_active)) { + int cpu; + + for_each_present_cpu(cpu) { + if (per_cpu(nmi_touch, cpu) != 1) + per_cpu(nmi_touch, cpu) = 1; + } + } +} +EXPORT_SYMBOL(arch_touch_nmi_watchdog); + +static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) +{ + int this_cpu = smp_processor_id(); + + if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, + pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) + return; + + if (do_panic || panic_on_oops) + panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); + else + WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); +} + +notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) +{ + unsigned int sum, touched = 0; + void *orig_sp; + + clear_softint(1 << irq); + + local_cpu_data().__nmi_count++; + + nmi_enter(); + + orig_sp = set_hardirq_stack(); + + if (notify_die(DIE_NMI, "nmi", regs, 0, + pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) + touched = 1; + else + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); + + sum = local_cpu_data().irq0_irqs; + if (__this_cpu_read(nmi_touch)) { + __this_cpu_write(nmi_touch, 0); + touched = 1; + } + if (!touched && __this_cpu_read(last_irq_sum) == sum) { + __this_cpu_inc(alert_counter); + if (__this_cpu_read(alert_counter) == 30 * nmi_hz) + die_nmi("BUG: NMI Watchdog detected LOCKUP", + regs, panic_on_timeout); + } else { + __this_cpu_write(last_irq_sum, sum); + __this_cpu_write(alert_counter, 0); + } + if (__this_cpu_read(wd_enabled)) { + pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); + } + + restore_hardirq_stack(orig_sp); + + nmi_exit(); +} + +static inline unsigned int get_nmi_count(int cpu) +{ + return cpu_data(cpu).__nmi_count; +} + +static __init void nmi_cpu_busy(void *data) +{ + while (endflag == 0) + mb(); +} + +static void report_broken_nmi(int cpu, int *prev_nmi_count) +{ + printk(KERN_CONT "\n"); + + printk(KERN_WARNING + "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n", + cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); + + printk(KERN_WARNING + "Please report this to bugzilla.kernel.org,\n"); + printk(KERN_WARNING + "and attach the output of the 'dmesg' command.\n"); + + per_cpu(wd_enabled, cpu) = 0; + atomic_dec(&nmi_active); +} + +void stop_nmi_watchdog(void *unused) +{ + if (!__this_cpu_read(wd_enabled)) + return; + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); + __this_cpu_write(wd_enabled, 0); + atomic_dec(&nmi_active); +} + +static int __init check_nmi_watchdog(void) +{ + unsigned int *prev_nmi_count; + int cpu, err; + + if (!atomic_read(&nmi_active)) + return 0; + + prev_nmi_count = kmalloc_array(nr_cpu_ids, sizeof(unsigned int), + GFP_KERNEL); + if (!prev_nmi_count) { + err = -ENOMEM; + goto error; + } + + printk(KERN_INFO "Testing NMI watchdog ... "); + + smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); + + for_each_possible_cpu(cpu) + prev_nmi_count[cpu] = get_nmi_count(cpu); + local_irq_enable(); + mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ + + for_each_online_cpu(cpu) { + if (!per_cpu(wd_enabled, cpu)) + continue; + if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) + report_broken_nmi(cpu, prev_nmi_count); + } + endflag = 1; + if (!atomic_read(&nmi_active)) { + kfree(prev_nmi_count); + atomic_set(&nmi_active, -1); + err = -ENODEV; + goto error; + } + printk("OK.\n"); + + nmi_hz = 1; + + kfree(prev_nmi_count); + return 0; +error: + on_each_cpu(stop_nmi_watchdog, NULL, 1); + return err; +} + +void start_nmi_watchdog(void *unused) +{ + if (__this_cpu_read(wd_enabled)) + return; + + __this_cpu_write(wd_enabled, 1); + atomic_inc(&nmi_active); + + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); + pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); + + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); +} + +static void nmi_adjust_hz_one(void *unused) +{ + if (!__this_cpu_read(wd_enabled)) + return; + + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); + pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); + + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); +} + +void nmi_adjust_hz(unsigned int new_hz) +{ + nmi_hz = new_hz; + on_each_cpu(nmi_adjust_hz_one, NULL, 1); +} +EXPORT_SYMBOL_GPL(nmi_adjust_hz); + +static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) +{ + on_each_cpu(stop_nmi_watchdog, NULL, 1); + return 0; +} + +static struct notifier_block nmi_reboot_notifier = { + .notifier_call = nmi_shutdown, +}; + +int __init nmi_init(void) +{ + int err; + + on_each_cpu(start_nmi_watchdog, NULL, 1); + + err = check_nmi_watchdog(); + if (!err) { + err = register_reboot_notifier(&nmi_reboot_notifier); + if (err) { + on_each_cpu(stop_nmi_watchdog, NULL, 1); + atomic_set(&nmi_active, -1); + } + } + + nmi_init_done = 1; + + return err; +} + +static int __init setup_nmi_watchdog(char *str) +{ + if (!strncmp(str, "panic", 5)) + panic_on_timeout = 1; + + return 0; +} +__setup("nmi_watchdog=", setup_nmi_watchdog); + +/* + * sparc specific NMI watchdog enable function. + * Enables watchdog if it is not enabled already. + */ +int watchdog_nmi_enable(unsigned int cpu) +{ + if (atomic_read(&nmi_active) == -1) { + pr_warn("NMI watchdog cannot be enabled or disabled\n"); + return -1; + } + + /* + * watchdog thread could start even before nmi_init is called. + * Just Return in that case. Let nmi_init finish the init + * process first. + */ + if (!nmi_init_done) + return 0; + + smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1); + + return 0; +} +/* + * sparc specific NMI watchdog disable function. + * Disables watchdog if it is not disabled already. + */ +void watchdog_nmi_disable(unsigned int cpu) +{ + if (atomic_read(&nmi_active) == -1) + pr_warn_once("NMI watchdog cannot be enabled or disabled\n"); + else + smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1); +} diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c new file mode 100644 index 000000000..4ebf51e6e --- /dev/null +++ b/arch/sparc/kernel/of_device_32.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "of_device_common.h" +#include "irq.h" + +/* + * PCI bus specific translator + */ + +static int of_bus_pci_match(struct device_node *np) +{ + if (of_node_is_type(np, "pci") || of_node_is_type(np, "pciex")) { + /* Do not do PCI specific frobbing if the + * PCI bridge lacks a ranges property. We + * want to pass it through up to the next + * parent as-is, not with the PCI translate + * method which chops off the top address cell. + */ + if (!of_find_property(np, "ranges", NULL)) + return 0; + + return 1; + } + + return 0; +} + +static void of_bus_pci_count_cells(struct device_node *np, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 3; + if (sizec) + *sizec = 2; +} + +static int of_bus_pci_map(u32 *addr, const u32 *range, + int na, int ns, int pna) +{ + u32 result[OF_MAX_ADDR_CELLS]; + int i; + + /* Check address type match */ + if ((addr[0] ^ range[0]) & 0x03000000) + return -EINVAL; + + if (of_out_of_range(addr + 1, range + 1, range + na + pna, + na - 1, ns)) + return -EINVAL; + + /* Start with the parent range base. */ + memcpy(result, range + na, pna * 4); + + /* Add in the child address offset, skipping high cell. */ + for (i = 0; i < na - 1; i++) + result[pna - 1 - i] += + (addr[na - 1 - i] - + range[na - 1 - i]); + + memcpy(addr, result, pna * 4); + + return 0; +} + +static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags) +{ + u32 w = addr[0]; + + /* For PCI, we override whatever child busses may have used. */ + flags = 0; + switch((w >> 24) & 0x03) { + case 0x01: + flags |= IORESOURCE_IO; + break; + + case 0x02: /* 32 bits */ + case 0x03: /* 64 bits */ + flags |= IORESOURCE_MEM; + break; + } + if (w & 0x40000000) + flags |= IORESOURCE_PREFETCH; + return flags; +} + +static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) +{ + return IORESOURCE_MEM; +} + + /* + * AMBAPP bus specific translator + */ + +static int of_bus_ambapp_match(struct device_node *np) +{ + return of_node_is_type(np, "ambapp"); +} + +static void of_bus_ambapp_count_cells(struct device_node *child, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 1; + if (sizec) + *sizec = 1; +} + +static int of_bus_ambapp_map(u32 *addr, const u32 *range, + int na, int ns, int pna) +{ + return of_bus_default_map(addr, range, na, ns, pna); +} + +static unsigned long of_bus_ambapp_get_flags(const u32 *addr, + unsigned long flags) +{ + return IORESOURCE_MEM; +} + +/* + * Array of bus specific translators + */ + +static struct of_bus of_busses[] = { + /* PCI */ + { + .name = "pci", + .addr_prop_name = "assigned-addresses", + .match = of_bus_pci_match, + .count_cells = of_bus_pci_count_cells, + .map = of_bus_pci_map, + .get_flags = of_bus_pci_get_flags, + }, + /* SBUS */ + { + .name = "sbus", + .addr_prop_name = "reg", + .match = of_bus_sbus_match, + .count_cells = of_bus_sbus_count_cells, + .map = of_bus_default_map, + .get_flags = of_bus_sbus_get_flags, + }, + /* AMBA */ + { + .name = "ambapp", + .addr_prop_name = "reg", + .match = of_bus_ambapp_match, + .count_cells = of_bus_ambapp_count_cells, + .map = of_bus_ambapp_map, + .get_flags = of_bus_ambapp_get_flags, + }, + /* Default */ + { + .name = "default", + .addr_prop_name = "reg", + .match = NULL, + .count_cells = of_bus_default_count_cells, + .map = of_bus_default_map, + .get_flags = of_bus_default_get_flags, + }, +}; + +static struct of_bus *of_match_bus(struct device_node *np) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(of_busses); i ++) + if (!of_busses[i].match || of_busses[i].match(np)) + return &of_busses[i]; + BUG(); + return NULL; +} + +static int __init build_one_resource(struct device_node *parent, + struct of_bus *bus, + struct of_bus *pbus, + u32 *addr, + int na, int ns, int pna) +{ + const u32 *ranges; + unsigned int rlen; + int rone; + + ranges = of_get_property(parent, "ranges", &rlen); + if (ranges == NULL || rlen == 0) { + u32 result[OF_MAX_ADDR_CELLS]; + int i; + + memset(result, 0, pna * 4); + for (i = 0; i < na; i++) + result[pna - 1 - i] = + addr[na - 1 - i]; + + memcpy(addr, result, pna * 4); + return 0; + } + + /* Now walk through the ranges */ + rlen /= 4; + rone = na + pna + ns; + for (; rlen >= rone; rlen -= rone, ranges += rone) { + if (!bus->map(addr, ranges, na, ns, pna)) + return 0; + } + + return 1; +} + +static int __init use_1to1_mapping(struct device_node *pp) +{ + /* If we have a ranges property in the parent, use it. */ + if (of_find_property(pp, "ranges", NULL) != NULL) + return 0; + + /* Some SBUS devices use intermediate nodes to express + * hierarchy within the device itself. These aren't + * real bus nodes, and don't have a 'ranges' property. + * But, we should still pass the translation work up + * to the SBUS itself. + */ + if (of_node_name_eq(pp, "dma") || + of_node_name_eq(pp, "espdma") || + of_node_name_eq(pp, "ledma") || + of_node_name_eq(pp, "lebuffer")) + return 0; + + return 1; +} + +static int of_resource_verbose; + +static void __init build_device_resources(struct platform_device *op, + struct device *parent) +{ + struct platform_device *p_op; + struct of_bus *bus; + int na, ns; + int index, num_reg; + const void *preg; + + if (!parent) + return; + + p_op = to_platform_device(parent); + bus = of_match_bus(p_op->dev.of_node); + bus->count_cells(op->dev.of_node, &na, &ns); + + preg = of_get_property(op->dev.of_node, bus->addr_prop_name, &num_reg); + if (!preg || num_reg == 0) + return; + + /* Convert to num-cells. */ + num_reg /= 4; + + /* Conver to num-entries. */ + num_reg /= na + ns; + + op->resource = op->archdata.resource; + op->num_resources = num_reg; + for (index = 0; index < num_reg; index++) { + struct resource *r = &op->resource[index]; + u32 addr[OF_MAX_ADDR_CELLS]; + const u32 *reg = (preg + (index * ((na + ns) * 4))); + struct device_node *dp = op->dev.of_node; + struct device_node *pp = p_op->dev.of_node; + struct of_bus *pbus, *dbus; + u64 size, result = OF_BAD_ADDR; + unsigned long flags; + int dna, dns; + int pna, pns; + + size = of_read_addr(reg + na, ns); + + memcpy(addr, reg, na * 4); + + flags = bus->get_flags(reg, 0); + + if (use_1to1_mapping(pp)) { + result = of_read_addr(addr, na); + goto build_res; + } + + dna = na; + dns = ns; + dbus = bus; + + while (1) { + dp = pp; + pp = dp->parent; + if (!pp) { + result = of_read_addr(addr, dna); + break; + } + + pbus = of_match_bus(pp); + pbus->count_cells(dp, &pna, &pns); + + if (build_one_resource(dp, dbus, pbus, addr, + dna, dns, pna)) + break; + + flags = pbus->get_flags(addr, flags); + + dna = pna; + dns = pns; + dbus = pbus; + } + + build_res: + memset(r, 0, sizeof(*r)); + + if (of_resource_verbose) + printk("%pOF reg[%d] -> %llx\n", + op->dev.of_node, index, + result); + + if (result != OF_BAD_ADDR) { + r->start = result & 0xffffffff; + r->end = result + size - 1; + r->flags = flags | ((result >> 32ULL) & 0xffUL); + } + r->name = op->dev.of_node->full_name; + } +} + +static struct platform_device * __init scan_one_device(struct device_node *dp, + struct device *parent) +{ + struct platform_device *op = kzalloc(sizeof(*op), GFP_KERNEL); + const struct linux_prom_irqs *intr; + struct dev_archdata *sd; + int len, i; + + if (!op) + return NULL; + + sd = &op->dev.archdata; + sd->op = op; + + op->dev.of_node = dp; + + intr = of_get_property(dp, "intr", &len); + if (intr) { + op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs); + for (i = 0; i < op->archdata.num_irqs; i++) + op->archdata.irqs[i] = + sparc_config.build_device_irq(op, intr[i].pri); + } else { + const unsigned int *irq = + of_get_property(dp, "interrupts", &len); + + if (irq) { + op->archdata.num_irqs = len / sizeof(unsigned int); + for (i = 0; i < op->archdata.num_irqs; i++) + op->archdata.irqs[i] = + sparc_config.build_device_irq(op, irq[i]); + } else { + op->archdata.num_irqs = 0; + } + } + + build_device_resources(op, parent); + + op->dev.parent = parent; + op->dev.bus = &platform_bus_type; + if (!parent) + dev_set_name(&op->dev, "root"); + else + dev_set_name(&op->dev, "%08x", dp->phandle); + + op->dev.coherent_dma_mask = DMA_BIT_MASK(32); + op->dev.dma_mask = &op->dev.coherent_dma_mask; + + if (of_device_register(op)) { + printk("%pOF: Could not register of device.\n", dp); + kfree(op); + op = NULL; + } + + return op; +} + +static void __init scan_tree(struct device_node *dp, struct device *parent) +{ + while (dp) { + struct platform_device *op = scan_one_device(dp, parent); + + if (op) + scan_tree(dp->child, &op->dev); + + dp = dp->sibling; + } +} + +static int __init scan_of_devices(void) +{ + struct device_node *root = of_find_node_by_path("/"); + struct platform_device *parent; + + parent = scan_one_device(root, NULL); + if (!parent) + return 0; + + scan_tree(root->child, &parent->dev); + return 0; +} +postcore_initcall(scan_of_devices); + +static int __init of_debug(char *str) +{ + int val = 0; + + get_option(&str, &val); + if (val & 1) + of_resource_verbose = 1; + return 1; +} + +__setup("of_debug=", of_debug); diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c new file mode 100644 index 000000000..5a9f86b1d --- /dev/null +++ b/arch/sparc/kernel/of_device_64.c @@ -0,0 +1,728 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "of_device_common.h" + +void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name) +{ + unsigned long ret = res->start + offset; + struct resource *r; + + if (res->flags & IORESOURCE_MEM) + r = request_mem_region(ret, size, name); + else + r = request_region(ret, size, name); + if (!r) + ret = 0; + + return (void __iomem *) ret; +} +EXPORT_SYMBOL(of_ioremap); + +void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) +{ + if (res->flags & IORESOURCE_MEM) + release_mem_region((unsigned long) base, size); + else + release_region((unsigned long) base, size); +} +EXPORT_SYMBOL(of_iounmap); + +/* + * PCI bus specific translator + */ + +static int of_bus_pci_match(struct device_node *np) +{ + if (of_node_name_eq(np, "pci")) { + const char *model = of_get_property(np, "model", NULL); + + if (model && !strcmp(model, "SUNW,simba")) + return 0; + + /* Do not do PCI specific frobbing if the + * PCI bridge lacks a ranges property. We + * want to pass it through up to the next + * parent as-is, not with the PCI translate + * method which chops off the top address cell. + */ + if (!of_find_property(np, "ranges", NULL)) + return 0; + + return 1; + } + + return 0; +} + +static int of_bus_simba_match(struct device_node *np) +{ + const char *model = of_get_property(np, "model", NULL); + + if (model && !strcmp(model, "SUNW,simba")) + return 1; + + /* Treat PCI busses lacking ranges property just like + * simba. + */ + if (of_node_name_eq(np, "pci")) { + if (!of_find_property(np, "ranges", NULL)) + return 1; + } + + return 0; +} + +static int of_bus_simba_map(u32 *addr, const u32 *range, + int na, int ns, int pna) +{ + return 0; +} + +static void of_bus_pci_count_cells(struct device_node *np, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 3; + if (sizec) + *sizec = 2; +} + +static int of_bus_pci_map(u32 *addr, const u32 *range, + int na, int ns, int pna) +{ + u32 result[OF_MAX_ADDR_CELLS]; + int i; + + /* Check address type match */ + if (!((addr[0] ^ range[0]) & 0x03000000)) + goto type_match; + + /* Special exception, we can map a 64-bit address into + * a 32-bit range. + */ + if ((addr[0] & 0x03000000) == 0x03000000 && + (range[0] & 0x03000000) == 0x02000000) + goto type_match; + + return -EINVAL; + +type_match: + if (of_out_of_range(addr + 1, range + 1, range + na + pna, + na - 1, ns)) + return -EINVAL; + + /* Start with the parent range base. */ + memcpy(result, range + na, pna * 4); + + /* Add in the child address offset, skipping high cell. */ + for (i = 0; i < na - 1; i++) + result[pna - 1 - i] += + (addr[na - 1 - i] - + range[na - 1 - i]); + + memcpy(addr, result, pna * 4); + + return 0; +} + +static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags) +{ + u32 w = addr[0]; + + /* For PCI, we override whatever child busses may have used. */ + flags = 0; + switch((w >> 24) & 0x03) { + case 0x01: + flags |= IORESOURCE_IO; + break; + + case 0x02: /* 32 bits */ + case 0x03: /* 64 bits */ + flags |= IORESOURCE_MEM; + break; + } + if (w & 0x40000000) + flags |= IORESOURCE_PREFETCH; + return flags; +} + +/* + * FHC/Central bus specific translator. + * + * This is just needed to hard-code the address and size cell + * counts. 'fhc' and 'central' nodes lack the #address-cells and + * #size-cells properties, and if you walk to the root on such + * Enterprise boxes all you'll get is a #size-cells of 2 which is + * not what we want to use. + */ +static int of_bus_fhc_match(struct device_node *np) +{ + return of_node_name_eq(np, "fhc") || + of_node_name_eq(np, "central"); +} + +#define of_bus_fhc_count_cells of_bus_sbus_count_cells + +/* + * Array of bus specific translators + */ + +static struct of_bus of_busses[] = { + /* PCI */ + { + .name = "pci", + .addr_prop_name = "assigned-addresses", + .match = of_bus_pci_match, + .count_cells = of_bus_pci_count_cells, + .map = of_bus_pci_map, + .get_flags = of_bus_pci_get_flags, + }, + /* SIMBA */ + { + .name = "simba", + .addr_prop_name = "assigned-addresses", + .match = of_bus_simba_match, + .count_cells = of_bus_pci_count_cells, + .map = of_bus_simba_map, + .get_flags = of_bus_pci_get_flags, + }, + /* SBUS */ + { + .name = "sbus", + .addr_prop_name = "reg", + .match = of_bus_sbus_match, + .count_cells = of_bus_sbus_count_cells, + .map = of_bus_default_map, + .get_flags = of_bus_default_get_flags, + }, + /* FHC */ + { + .name = "fhc", + .addr_prop_name = "reg", + .match = of_bus_fhc_match, + .count_cells = of_bus_fhc_count_cells, + .map = of_bus_default_map, + .get_flags = of_bus_default_get_flags, + }, + /* Default */ + { + .name = "default", + .addr_prop_name = "reg", + .match = NULL, + .count_cells = of_bus_default_count_cells, + .map = of_bus_default_map, + .get_flags = of_bus_default_get_flags, + }, +}; + +static struct of_bus *of_match_bus(struct device_node *np) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(of_busses); i ++) + if (!of_busses[i].match || of_busses[i].match(np)) + return &of_busses[i]; + BUG(); + return NULL; +} + +static int __init build_one_resource(struct device_node *parent, + struct of_bus *bus, + struct of_bus *pbus, + u32 *addr, + int na, int ns, int pna) +{ + const u32 *ranges; + int rone, rlen; + + ranges = of_get_property(parent, "ranges", &rlen); + if (ranges == NULL || rlen == 0) { + u32 result[OF_MAX_ADDR_CELLS]; + int i; + + memset(result, 0, pna * 4); + for (i = 0; i < na; i++) + result[pna - 1 - i] = + addr[na - 1 - i]; + + memcpy(addr, result, pna * 4); + return 0; + } + + /* Now walk through the ranges */ + rlen /= 4; + rone = na + pna + ns; + for (; rlen >= rone; rlen -= rone, ranges += rone) { + if (!bus->map(addr, ranges, na, ns, pna)) + return 0; + } + + /* When we miss an I/O space match on PCI, just pass it up + * to the next PCI bridge and/or controller. + */ + if (!strcmp(bus->name, "pci") && + (addr[0] & 0x03000000) == 0x01000000) + return 0; + + return 1; +} + +static int __init use_1to1_mapping(struct device_node *pp) +{ + /* If we have a ranges property in the parent, use it. */ + if (of_find_property(pp, "ranges", NULL) != NULL) + return 0; + + /* If the parent is the dma node of an ISA bus, pass + * the translation up to the root. + * + * Some SBUS devices use intermediate nodes to express + * hierarchy within the device itself. These aren't + * real bus nodes, and don't have a 'ranges' property. + * But, we should still pass the translation work up + * to the SBUS itself. + */ + if (of_node_name_eq(pp, "dma") || + of_node_name_eq(pp, "espdma") || + of_node_name_eq(pp, "ledma") || + of_node_name_eq(pp, "lebuffer")) + return 0; + + /* Similarly for all PCI bridges, if we get this far + * it lacks a ranges property, and this will include + * cases like Simba. + */ + if (of_node_name_eq(pp, "pci")) + return 0; + + return 1; +} + +static int of_resource_verbose; + +static void __init build_device_resources(struct platform_device *op, + struct device *parent) +{ + struct platform_device *p_op; + struct of_bus *bus; + int na, ns; + int index, num_reg; + const void *preg; + + if (!parent) + return; + + p_op = to_platform_device(parent); + bus = of_match_bus(p_op->dev.of_node); + bus->count_cells(op->dev.of_node, &na, &ns); + + preg = of_get_property(op->dev.of_node, bus->addr_prop_name, &num_reg); + if (!preg || num_reg == 0) + return; + + /* Convert to num-cells. */ + num_reg /= 4; + + /* Convert to num-entries. */ + num_reg /= na + ns; + + /* Prevent overrunning the op->resources[] array. */ + if (num_reg > PROMREG_MAX) { + printk(KERN_WARNING "%pOF: Too many regs (%d), " + "limiting to %d.\n", + op->dev.of_node, num_reg, PROMREG_MAX); + num_reg = PROMREG_MAX; + } + + op->resource = op->archdata.resource; + op->num_resources = num_reg; + for (index = 0; index < num_reg; index++) { + struct resource *r = &op->resource[index]; + u32 addr[OF_MAX_ADDR_CELLS]; + const u32 *reg = (preg + (index * ((na + ns) * 4))); + struct device_node *dp = op->dev.of_node; + struct device_node *pp = p_op->dev.of_node; + struct of_bus *pbus, *dbus; + u64 size, result = OF_BAD_ADDR; + unsigned long flags; + int dna, dns; + int pna, pns; + + size = of_read_addr(reg + na, ns); + memcpy(addr, reg, na * 4); + + flags = bus->get_flags(addr, 0); + + if (use_1to1_mapping(pp)) { + result = of_read_addr(addr, na); + goto build_res; + } + + dna = na; + dns = ns; + dbus = bus; + + while (1) { + dp = pp; + pp = dp->parent; + if (!pp) { + result = of_read_addr(addr, dna); + break; + } + + pbus = of_match_bus(pp); + pbus->count_cells(dp, &pna, &pns); + + if (build_one_resource(dp, dbus, pbus, addr, + dna, dns, pna)) + break; + + flags = pbus->get_flags(addr, flags); + + dna = pna; + dns = pns; + dbus = pbus; + } + + build_res: + memset(r, 0, sizeof(*r)); + + if (of_resource_verbose) + printk("%pOF reg[%d] -> %llx\n", + op->dev.of_node, index, + result); + + if (result != OF_BAD_ADDR) { + if (tlb_type == hypervisor) + result &= 0x0fffffffffffffffUL; + + r->start = result; + r->end = result + size - 1; + r->flags = flags; + } + r->name = op->dev.of_node->full_name; + } +} + +static struct device_node * __init +apply_interrupt_map(struct device_node *dp, struct device_node *pp, + const u32 *imap, int imlen, const u32 *imask, + unsigned int *irq_p) +{ + struct device_node *cp; + unsigned int irq = *irq_p; + struct of_bus *bus; + phandle handle; + const u32 *reg; + int na, num_reg, i; + + bus = of_match_bus(pp); + bus->count_cells(dp, &na, NULL); + + reg = of_get_property(dp, "reg", &num_reg); + if (!reg || !num_reg) + return NULL; + + imlen /= ((na + 3) * 4); + handle = 0; + for (i = 0; i < imlen; i++) { + int j; + + for (j = 0; j < na; j++) { + if ((reg[j] & imask[j]) != imap[j]) + goto next; + } + if (imap[na] == irq) { + handle = imap[na + 1]; + irq = imap[na + 2]; + break; + } + + next: + imap += (na + 3); + } + if (i == imlen) { + /* Psycho and Sabre PCI controllers can have 'interrupt-map' + * properties that do not include the on-board device + * interrupts. Instead, the device's 'interrupts' property + * is already a fully specified INO value. + * + * Handle this by deciding that, if we didn't get a + * match in the parent's 'interrupt-map', and the + * parent is an IRQ translator, then use the parent as + * our IRQ controller. + */ + if (pp->irq_trans) + return pp; + + return NULL; + } + + *irq_p = irq; + cp = of_find_node_by_phandle(handle); + + return cp; +} + +static unsigned int __init pci_irq_swizzle(struct device_node *dp, + struct device_node *pp, + unsigned int irq) +{ + const struct linux_prom_pci_registers *regs; + unsigned int bus, devfn, slot, ret; + + if (irq < 1 || irq > 4) + return irq; + + regs = of_get_property(dp, "reg", NULL); + if (!regs) + return irq; + + bus = (regs->phys_hi >> 16) & 0xff; + devfn = (regs->phys_hi >> 8) & 0xff; + slot = (devfn >> 3) & 0x1f; + + if (pp->irq_trans) { + /* Derived from Table 8-3, U2P User's Manual. This branch + * is handling a PCI controller that lacks a proper set of + * interrupt-map and interrupt-map-mask properties. The + * Ultra-E450 is one example. + * + * The bit layout is BSSLL, where: + * B: 0 on bus A, 1 on bus B + * D: 2-bit slot number, derived from PCI device number as + * (dev - 1) for bus A, or (dev - 2) for bus B + * L: 2-bit line number + */ + if (bus & 0x80) { + /* PBM-A */ + bus = 0x00; + slot = (slot - 1) << 2; + } else { + /* PBM-B */ + bus = 0x10; + slot = (slot - 2) << 2; + } + irq -= 1; + + ret = (bus | slot | irq); + } else { + /* Going through a PCI-PCI bridge that lacks a set of + * interrupt-map and interrupt-map-mask properties. + */ + ret = ((irq - 1 + (slot & 3)) & 3) + 1; + } + + return ret; +} + +static int of_irq_verbose; + +static unsigned int __init build_one_device_irq(struct platform_device *op, + struct device *parent, + unsigned int irq) +{ + struct device_node *dp = op->dev.of_node; + struct device_node *pp, *ip; + unsigned int orig_irq = irq; + int nid; + + if (irq == 0xffffffff) + return irq; + + if (dp->irq_trans) { + irq = dp->irq_trans->irq_build(dp, irq, + dp->irq_trans->data); + + if (of_irq_verbose) + printk("%pOF: direct translate %x --> %x\n", + dp, orig_irq, irq); + + goto out; + } + + /* Something more complicated. Walk up to the root, applying + * interrupt-map or bus specific translations, until we hit + * an IRQ translator. + * + * If we hit a bus type or situation we cannot handle, we + * stop and assume that the original IRQ number was in a + * format which has special meaning to it's immediate parent. + */ + pp = dp->parent; + ip = NULL; + while (pp) { + const void *imap, *imsk; + int imlen; + + imap = of_get_property(pp, "interrupt-map", &imlen); + imsk = of_get_property(pp, "interrupt-map-mask", NULL); + if (imap && imsk) { + struct device_node *iret; + int this_orig_irq = irq; + + iret = apply_interrupt_map(dp, pp, + imap, imlen, imsk, + &irq); + + if (of_irq_verbose) + printk("%pOF: Apply [%pOF:%x] imap --> [%pOF:%x]\n", + op->dev.of_node, + pp, this_orig_irq, iret, irq); + + if (!iret) + break; + + if (iret->irq_trans) { + ip = iret; + break; + } + } else { + if (of_node_name_eq(pp, "pci")) { + unsigned int this_orig_irq = irq; + + irq = pci_irq_swizzle(dp, pp, irq); + if (of_irq_verbose) + printk("%pOF: PCI swizzle [%pOF] " + "%x --> %x\n", + op->dev.of_node, + pp, this_orig_irq, + irq); + + } + + if (pp->irq_trans) { + ip = pp; + break; + } + } + dp = pp; + pp = pp->parent; + } + if (!ip) + return orig_irq; + + irq = ip->irq_trans->irq_build(op->dev.of_node, irq, + ip->irq_trans->data); + if (of_irq_verbose) + printk("%pOF: Apply IRQ trans [%pOF] %x --> %x\n", + op->dev.of_node, ip, orig_irq, irq); + +out: + nid = of_node_to_nid(dp); + if (nid != -1) { + cpumask_t numa_mask; + + cpumask_copy(&numa_mask, cpumask_of_node(nid)); + irq_set_affinity(irq, &numa_mask); + } + + return irq; +} + +static struct platform_device * __init scan_one_device(struct device_node *dp, + struct device *parent) +{ + struct platform_device *op = kzalloc(sizeof(*op), GFP_KERNEL); + const unsigned int *irq; + struct dev_archdata *sd; + int len, i; + + if (!op) + return NULL; + + sd = &op->dev.archdata; + sd->op = op; + + op->dev.of_node = dp; + + irq = of_get_property(dp, "interrupts", &len); + if (irq) { + op->archdata.num_irqs = len / 4; + + /* Prevent overrunning the op->irqs[] array. */ + if (op->archdata.num_irqs > PROMINTR_MAX) { + printk(KERN_WARNING "%pOF: Too many irqs (%d), " + "limiting to %d.\n", + dp, op->archdata.num_irqs, PROMINTR_MAX); + op->archdata.num_irqs = PROMINTR_MAX; + } + memcpy(op->archdata.irqs, irq, op->archdata.num_irqs * 4); + } else { + op->archdata.num_irqs = 0; + } + + build_device_resources(op, parent); + for (i = 0; i < op->archdata.num_irqs; i++) + op->archdata.irqs[i] = build_one_device_irq(op, parent, op->archdata.irqs[i]); + + op->dev.parent = parent; + op->dev.bus = &platform_bus_type; + if (!parent) + dev_set_name(&op->dev, "root"); + else + dev_set_name(&op->dev, "%08x", dp->phandle); + op->dev.coherent_dma_mask = DMA_BIT_MASK(32); + op->dev.dma_mask = &op->dev.coherent_dma_mask; + + if (of_device_register(op)) { + printk("%pOF: Could not register of device.\n", dp); + kfree(op); + op = NULL; + } + + return op; +} + +static void __init scan_tree(struct device_node *dp, struct device *parent) +{ + while (dp) { + struct platform_device *op = scan_one_device(dp, parent); + + if (op) + scan_tree(dp->child, &op->dev); + + dp = dp->sibling; + } +} + +static int __init scan_of_devices(void) +{ + struct device_node *root = of_find_node_by_path("/"); + struct platform_device *parent; + + parent = scan_one_device(root, NULL); + if (!parent) + return 0; + + scan_tree(root->child, &parent->dev); + return 0; +} +postcore_initcall(scan_of_devices); + +static int __init of_debug(char *str) +{ + int val = 0; + + get_option(&str, &val); + if (val & 1) + of_resource_verbose = 1; + if (val & 2) + of_irq_verbose = 1; + return 1; +} + +__setup("of_debug=", of_debug); diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c new file mode 100644 index 000000000..e717a56ef --- /dev/null +++ b/arch/sparc/kernel/of_device_common.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "of_device_common.h" + +unsigned int irq_of_parse_and_map(struct device_node *node, int index) +{ + struct platform_device *op = of_find_device_by_node(node); + + if (!op || index >= op->archdata.num_irqs) + return 0; + + return op->archdata.irqs[index]; +} +EXPORT_SYMBOL(irq_of_parse_and_map); + +int of_address_to_resource(struct device_node *node, int index, + struct resource *r) +{ + struct platform_device *op = of_find_device_by_node(node); + + if (!op || index >= op->num_resources) + return -EINVAL; + + memcpy(r, &op->archdata.resource[index], sizeof(*r)); + return 0; +} +EXPORT_SYMBOL_GPL(of_address_to_resource); + +void __iomem *of_iomap(struct device_node *node, int index) +{ + struct platform_device *op = of_find_device_by_node(node); + struct resource *r; + + if (!op || index >= op->num_resources) + return NULL; + + r = &op->archdata.resource[index]; + + return of_ioremap(r, 0, resource_size(r), (char *) r->name); +} +EXPORT_SYMBOL(of_iomap); + +/* Take the archdata values for IOMMU, STC, and HOSTDATA found in + * BUS and propagate to all child platform_device objects. + */ +void of_propagate_archdata(struct platform_device *bus) +{ + struct dev_archdata *bus_sd = &bus->dev.archdata; + struct device_node *bus_dp = bus->dev.of_node; + struct device_node *dp; + + for (dp = bus_dp->child; dp; dp = dp->sibling) { + struct platform_device *op = of_find_device_by_node(dp); + + op->dev.archdata.iommu = bus_sd->iommu; + op->dev.archdata.stc = bus_sd->stc; + op->dev.archdata.host_controller = bus_sd->host_controller; + op->dev.archdata.numa_node = bus_sd->numa_node; + op->dev.dma_ops = bus->dev.dma_ops; + + if (dp->child) + of_propagate_archdata(op); + } +} + +static void get_cells(struct device_node *dp, int *addrc, int *sizec) +{ + if (addrc) + *addrc = of_n_addr_cells(dp); + if (sizec) + *sizec = of_n_size_cells(dp); +} + +/* + * Default translator (generic bus) + */ + +void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec) +{ + get_cells(dev, addrc, sizec); +} + +/* Make sure the least significant 64-bits are in-range. Even + * for 3 or 4 cell values it is a good enough approximation. + */ +int of_out_of_range(const u32 *addr, const u32 *base, + const u32 *size, int na, int ns) +{ + u64 a = of_read_addr(addr, na); + u64 b = of_read_addr(base, na); + + if (a < b) + return 1; + + b += of_read_addr(size, ns); + if (a >= b) + return 1; + + return 0; +} + +int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna) +{ + u32 result[OF_MAX_ADDR_CELLS]; + int i; + + if (ns > 2) { + printk("of_device: Cannot handle size cells (%d) > 2.", ns); + return -EINVAL; + } + + if (of_out_of_range(addr, range, range + na + pna, na, ns)) + return -EINVAL; + + /* Start with the parent range base. */ + memcpy(result, range + na, pna * 4); + + /* Add in the child address offset. */ + for (i = 0; i < na; i++) + result[pna - 1 - i] += + (addr[na - 1 - i] - + range[na - 1 - i]); + + memcpy(addr, result, pna * 4); + + return 0; +} + +unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags) +{ + if (flags) + return flags; + return IORESOURCE_MEM; +} + +/* + * SBUS bus specific translator + */ + +int of_bus_sbus_match(struct device_node *np) +{ + struct device_node *dp = np; + + while (dp) { + if (of_node_name_eq(dp, "sbus") || + of_node_name_eq(dp, "sbi")) + return 1; + + /* Have a look at use_1to1_mapping(). We're trying + * to match SBUS if that's the top-level bus and we + * don't have some intervening real bus that provides + * ranges based translations. + */ + if (of_find_property(dp, "ranges", NULL) != NULL) + break; + + dp = dp->parent; + } + + return 0; +} + +void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec) +{ + if (addrc) + *addrc = 2; + if (sizec) + *sizec = 1; +} diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h new file mode 100644 index 000000000..3d66230c6 --- /dev/null +++ b/arch/sparc/kernel/of_device_common.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _OF_DEVICE_COMMON_H +#define _OF_DEVICE_COMMON_H + +static inline u64 of_read_addr(const u32 *cell, int size) +{ + u64 r = 0; + while (size--) + r = (r << 32) | *(cell++); + return r; +} + +void of_bus_default_count_cells(struct device_node *dev, int *addrc, + int *sizec); +int of_out_of_range(const u32 *addr, const u32 *base, + const u32 *size, int na, int ns); +int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna); +unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags); + +int of_bus_sbus_match(struct device_node *np); +void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec); + +/* Max address size we deal with */ +#define OF_MAX_ADDR_CELLS 4 + +struct of_bus { + const char *name; + const char *addr_prop_name; + int (*match)(struct device_node *parent); + void (*count_cells)(struct device_node *child, + int *addrc, int *sizec); + int (*map)(u32 *addr, const u32 *range, + int na, int ns, int pna); + unsigned long (*get_flags)(const u32 *addr, unsigned long); +}; + +#endif /* _OF_DEVICE_COMMON_H */ diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c new file mode 100644 index 000000000..cb1ef2511 --- /dev/null +++ b/arch/sparc/kernel/pci.c @@ -0,0 +1,1011 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pci.c: UltraSparc PCI controller support. + * + * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) + * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) + * + * OF tree based PCI bus probing taken from the PowerPC port + * with minor modifications, see there for credits. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pci_impl.h" +#include "kernel.h" + +/* List of all PCI controllers found in the system. */ +struct pci_pbm_info *pci_pbm_root = NULL; + +/* Each PBM found gets a unique index. */ +int pci_num_pbms = 0; + +volatile int pci_poke_in_progress; +volatile int pci_poke_cpu = -1; +volatile int pci_poke_faulted; + +static DEFINE_SPINLOCK(pci_poke_lock); + +void pci_config_read8(u8 *addr, u8 *ret) +{ + unsigned long flags; + u8 byte; + + spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "lduba [%1] %2, %0\n\t" + "membar #Sync" + : "=r" (byte) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + if (!pci_poke_faulted) + *ret = byte; + spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_read16(u16 *addr, u16 *ret) +{ + unsigned long flags; + u16 word; + + spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "lduha [%1] %2, %0\n\t" + "membar #Sync" + : "=r" (word) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + if (!pci_poke_faulted) + *ret = word; + spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_read32(u32 *addr, u32 *ret) +{ + unsigned long flags; + u32 dword; + + spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "lduwa [%1] %2, %0\n\t" + "membar #Sync" + : "=r" (dword) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + if (!pci_poke_faulted) + *ret = dword; + spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_write8(u8 *addr, u8 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "stba %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_write16(u16 *addr, u16 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "stha %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_write32(u32 *addr, u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "stwa %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +static int ofpci_verbose; + +static int __init ofpci_debug(char *str) +{ + int val = 0; + + get_option(&str, &val); + if (val) + ofpci_verbose = 1; + return 1; +} + +__setup("ofpci_debug=", ofpci_debug); + +static unsigned long pci_parse_of_flags(u32 addr0) +{ + unsigned long flags = 0; + + if (addr0 & 0x02000000) { + flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; + flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; + if (addr0 & 0x01000000) + flags |= IORESOURCE_MEM_64 + | PCI_BASE_ADDRESS_MEM_TYPE_64; + if (addr0 & 0x40000000) + flags |= IORESOURCE_PREFETCH + | PCI_BASE_ADDRESS_MEM_PREFETCH; + } else if (addr0 & 0x01000000) + flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; + return flags; +} + +/* The of_device layer has translated all of the assigned-address properties + * into physical address resources, we only have to figure out the register + * mapping. + */ +static void pci_parse_of_addrs(struct platform_device *op, + struct device_node *node, + struct pci_dev *dev) +{ + struct resource *op_res; + const u32 *addrs; + int proplen; + + addrs = of_get_property(node, "assigned-addresses", &proplen); + if (!addrs) + return; + if (ofpci_verbose) + pci_info(dev, " parse addresses (%d bytes) @ %p\n", + proplen, addrs); + op_res = &op->resource[0]; + for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { + struct resource *res; + unsigned long flags; + int i; + + flags = pci_parse_of_flags(addrs[0]); + if (!flags) + continue; + i = addrs[0] & 0xff; + if (ofpci_verbose) + pci_info(dev, " start: %llx, end: %llx, i: %x\n", + op_res->start, op_res->end, i); + + if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { + res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; + } else if (i == dev->rom_base_reg) { + res = &dev->resource[PCI_ROM_RESOURCE]; + flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; + } else { + pci_err(dev, "bad cfg reg num 0x%x\n", i); + continue; + } + res->start = op_res->start; + res->end = op_res->end; + res->flags = flags; + res->name = pci_name(dev); + + pci_info(dev, "reg 0x%x: %pR\n", i, res); + } +} + +static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu, + void *stc, void *host_controller, + struct platform_device *op, + int numa_node) +{ + sd->iommu = iommu; + sd->stc = stc; + sd->host_controller = host_controller; + sd->op = op; + sd->numa_node = numa_node; +} + +static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_bus *bus, int devfn) +{ + struct dev_archdata *sd; + struct platform_device *op; + struct pci_dev *dev; + u32 class; + + dev = pci_alloc_dev(bus); + if (!dev) + return NULL; + + op = of_find_device_by_node(node); + sd = &dev->dev.archdata; + pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op, + pbm->numa_node); + sd = &op->dev.archdata; + sd->iommu = pbm->iommu; + sd->stc = &pbm->stc; + sd->numa_node = pbm->numa_node; + + if (of_node_name_eq(node, "ebus")) + of_propagate_archdata(op); + + if (ofpci_verbose) + pci_info(bus," create device, devfn: %x, type: %s\n", + devfn, of_node_get_device_type(node)); + + dev->sysdata = node; + dev->dev.parent = bus->bridge; + dev->dev.bus = &pci_bus_type; + dev->dev.of_node = of_node_get(node); + dev->devfn = devfn; + dev->multifunction = 0; /* maybe a lie? */ + set_pcie_port_type(dev); + + pci_dev_assign_slot(dev); + dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); + dev->device = of_getintprop_default(node, "device-id", 0xffff); + dev->subsystem_vendor = + of_getintprop_default(node, "subsystem-vendor-id", 0); + dev->subsystem_device = + of_getintprop_default(node, "subsystem-id", 0); + + dev->cfg_size = pci_cfg_space_size(dev); + + /* We can't actually use the firmware value, we have + * to read what is in the register right now. One + * reason is that in the case of IDE interfaces the + * firmware can sample the value before the the IDE + * interface is programmed into native mode. + */ + pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); + dev->class = class >> 8; + dev->revision = class & 0xff; + + dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus), + dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + /* I have seen IDE devices which will not respond to + * the bmdma simplex check reads if bus mastering is + * disabled. + */ + if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) + pci_set_master(dev); + + dev->current_state = PCI_UNKNOWN; /* unknown power state */ + dev->error_state = pci_channel_io_normal; + dev->dma_mask = 0xffffffff; + + if (of_node_name_eq(node, "pci")) { + /* a PCI-PCI bridge */ + dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; + dev->rom_base_reg = PCI_ROM_ADDRESS1; + } else if (of_node_is_type(node, "cardbus")) { + dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; + } else { + dev->hdr_type = PCI_HEADER_TYPE_NORMAL; + dev->rom_base_reg = PCI_ROM_ADDRESS; + + dev->irq = sd->op->archdata.irqs[0]; + if (dev->irq == 0xffffffff) + dev->irq = PCI_IRQ_NONE; + } + + pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", + dev->vendor, dev->device, dev->hdr_type, dev->class); + + pci_parse_of_addrs(sd->op, node, dev); + + if (ofpci_verbose) + pci_info(dev, " adding to system ...\n"); + + pci_device_add(dev, bus); + + return dev; +} + +static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) +{ + u32 idx, first, last; + + first = 8; + last = 0; + for (idx = 0; idx < 8; idx++) { + if ((map & (1 << idx)) != 0) { + if (first > idx) + first = idx; + if (last < idx) + last = idx; + } + } + + *first_p = first; + *last_p = last; +} + +/* Cook up fake bus resources for SUNW,simba PCI bridges which lack + * a proper 'ranges' property. + */ +static void apb_fake_ranges(struct pci_dev *dev, + struct pci_bus *bus, + struct pci_pbm_info *pbm) +{ + struct pci_bus_region region; + struct resource *res; + u32 first, last; + u8 map; + + pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map); + apb_calc_first_last(map, &first, &last); + res = bus->resource[0]; + res->flags = IORESOURCE_IO; + region.start = (first << 21); + region.end = (last << 21) + ((1 << 21) - 1); + pcibios_bus_to_resource(dev->bus, res, ®ion); + + pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); + apb_calc_first_last(map, &first, &last); + res = bus->resource[1]; + res->flags = IORESOURCE_MEM; + region.start = (first << 29); + region.end = (last << 29) + ((1 << 29) - 1); + pcibios_bus_to_resource(dev->bus, res, ®ion); +} + +static void pci_of_scan_bus(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_bus *bus); + +#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) + +static void of_scan_pci_bridge(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_dev *dev) +{ + struct pci_bus *bus; + const u32 *busrange, *ranges; + int len, i, simba; + struct pci_bus_region region; + struct resource *res; + unsigned int flags; + u64 size; + + if (ofpci_verbose) + pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node); + + /* parse bus-range property */ + busrange = of_get_property(node, "bus-range", &len); + if (busrange == NULL || len != 8) { + pci_info(dev, "Can't get bus-range for PCI-PCI bridge %pOF\n", + node); + return; + } + + if (ofpci_verbose) + pci_info(dev, " Bridge bus range [%u --> %u]\n", + busrange[0], busrange[1]); + + ranges = of_get_property(node, "ranges", &len); + simba = 0; + if (ranges == NULL) { + const char *model = of_get_property(node, "model", NULL); + if (model && !strcmp(model, "SUNW,simba")) + simba = 1; + } + + bus = pci_add_new_bus(dev->bus, dev, busrange[0]); + if (!bus) { + pci_err(dev, "Failed to create pci bus for %pOF\n", + node); + return; + } + + bus->primary = dev->bus->number; + pci_bus_insert_busn_res(bus, busrange[0], busrange[1]); + bus->bridge_ctl = 0; + + if (ofpci_verbose) + pci_info(dev, " Bridge ranges[%p] simba[%d]\n", + ranges, simba); + + /* parse ranges property, or cook one up by hand for Simba */ + /* PCI #address-cells == 3 and #size-cells == 2 always */ + res = &dev->resource[PCI_BRIDGE_RESOURCES]; + for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { + res->flags = 0; + bus->resource[i] = res; + ++res; + } + if (simba) { + apb_fake_ranges(dev, bus, pbm); + goto after_ranges; + } else if (ranges == NULL) { + pci_read_bridge_bases(bus); + goto after_ranges; + } + i = 1; + for (; len >= 32; len -= 32, ranges += 8) { + u64 start; + + if (ofpci_verbose) + pci_info(dev, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:" + "%08x:%08x]\n", + ranges[0], ranges[1], ranges[2], ranges[3], + ranges[4], ranges[5], ranges[6], ranges[7]); + + flags = pci_parse_of_flags(ranges[0]); + size = GET_64BIT(ranges, 6); + if (flags == 0 || size == 0) + continue; + + /* On PCI-Express systems, PCI bridges that have no devices downstream + * have a bogus size value where the first 32-bit cell is 0xffffffff. + * This results in a bogus range where start + size overflows. + * + * Just skip these otherwise the kernel will complain when the resource + * tries to be claimed. + */ + if (size >> 32 == 0xffffffff) + continue; + + if (flags & IORESOURCE_IO) { + res = bus->resource[0]; + if (res->flags) { + pci_err(dev, "ignoring extra I/O range" + " for bridge %pOF\n", node); + continue; + } + } else { + if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { + pci_err(dev, "too many memory ranges" + " for bridge %pOF\n", node); + continue; + } + res = bus->resource[i]; + ++i; + } + + res->flags = flags; + region.start = start = GET_64BIT(ranges, 1); + region.end = region.start + size - 1; + + if (ofpci_verbose) + pci_info(dev, " Using flags[%08x] start[%016llx] size[%016llx]\n", + flags, start, size); + + pcibios_bus_to_resource(dev->bus, res, ®ion); + } +after_ranges: + sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), + bus->number); + if (ofpci_verbose) + pci_info(dev, " bus name: %s\n", bus->name); + + pci_of_scan_bus(pbm, node, bus); +} + +static void pci_of_scan_bus(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_bus *bus) +{ + struct device_node *child; + const u32 *reg; + int reglen, devfn, prev_devfn; + struct pci_dev *dev; + + if (ofpci_verbose) + pci_info(bus, "scan_bus[%pOF] bus no %d\n", + node, bus->number); + + prev_devfn = -1; + for_each_child_of_node(node, child) { + if (ofpci_verbose) + pci_info(bus, " * %pOF\n", child); + reg = of_get_property(child, "reg", ®len); + if (reg == NULL || reglen < 20) + continue; + + devfn = (reg[0] >> 8) & 0xff; + + /* This is a workaround for some device trees + * which list PCI devices twice. On the V100 + * for example, device number 3 is listed twice. + * Once as "pm" and once again as "lomp". + */ + if (devfn == prev_devfn) + continue; + prev_devfn = devfn; + + /* create a new pci_dev for this device */ + dev = of_create_pci_dev(pbm, child, bus, devfn); + if (!dev) + continue; + if (ofpci_verbose) + pci_info(dev, "dev header type: %x\n", dev->hdr_type); + + if (pci_is_bridge(dev)) + of_scan_pci_bridge(pbm, child, dev); + } +} + +static ssize_t +show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) +{ + struct pci_dev *pdev; + struct device_node *dp; + + pdev = to_pci_dev(dev); + dp = pdev->dev.of_node; + + return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp); +} + +static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); + +static void pci_bus_register_of_sysfs(struct pci_bus *bus) +{ + struct pci_dev *dev; + struct pci_bus *child_bus; + int err; + + list_for_each_entry(dev, &bus->devices, bus_list) { + /* we don't really care if we can create this file or + * not, but we need to assign the result of the call + * or the world will fall under alien invasion and + * everybody will be frozen on a spaceship ready to be + * eaten on alpha centauri by some green and jelly + * humanoid. + */ + err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); + (void) err; + } + list_for_each_entry(child_bus, &bus->children, node) + pci_bus_register_of_sysfs(child_bus); +} + +static void pci_claim_legacy_resources(struct pci_dev *dev) +{ + struct pci_bus_region region; + struct resource *p, *root, *conflict; + + if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) + return; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return; + + p->name = "Video RAM area"; + p->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + + region.start = 0xa0000UL; + region.end = region.start + 0x1ffffUL; + pcibios_bus_to_resource(dev->bus, p, ®ion); + + root = pci_find_parent_resource(dev, p); + if (!root) { + pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p); + goto err; + } + + conflict = request_resource_conflict(root, p); + if (conflict) { + pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n", + p, conflict->name, conflict); + goto err; + } + + pci_info(dev, "VGA legacy framebuffer %pR\n", p); + return; + +err: + kfree(p); +} + +static void pci_claim_bus_resources(struct pci_bus *bus) +{ + struct pci_bus *child_bus; + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (r->parent || !r->start || !r->flags) + continue; + + if (ofpci_verbose) + pci_info(dev, "Claiming Resource %d: %pR\n", + i, r); + + pci_claim_resource(dev, i); + } + + pci_claim_legacy_resources(dev); + } + + list_for_each_entry(child_bus, &bus->children, node) + pci_claim_bus_resources(child_bus); +} + +struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, + struct device *parent) +{ + LIST_HEAD(resources); + struct device_node *node = pbm->op->dev.of_node; + struct pci_bus *bus; + + printk("PCI: Scanning PBM %pOF\n", node); + + pci_add_resource_offset(&resources, &pbm->io_space, + pbm->io_offset); + pci_add_resource_offset(&resources, &pbm->mem_space, + pbm->mem_offset); + if (pbm->mem64_space.flags) + pci_add_resource_offset(&resources, &pbm->mem64_space, + pbm->mem64_offset); + pbm->busn.start = pbm->pci_first_busno; + pbm->busn.end = pbm->pci_last_busno; + pbm->busn.flags = IORESOURCE_BUS; + pci_add_resource(&resources, &pbm->busn); + bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops, + pbm, &resources); + if (!bus) { + printk(KERN_ERR "Failed to create bus for %pOF\n", node); + pci_free_resource_list(&resources); + return NULL; + } + + pci_of_scan_bus(pbm, node, bus); + pci_bus_register_of_sysfs(bus); + + pci_claim_bus_resources(bus); + + pci_bus_add_devices(bus); + return bus; +} + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + u16 cmd, oldcmd; + int i; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + oldcmd = cmd; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + /* Only set up the requested stuff */ + if (!(mask & (1<flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (res->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + + if (cmd != oldcmd) { + pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + +/* Platform support for /proc/bus/pci/X/Y mmap()s. */ +int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma) +{ + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; + resource_size_t ioaddr = pci_resource_start(pdev, bar); + + if (!pbm) + return -EINVAL; + + vma->vm_pgoff += (ioaddr + pbm->io_space.start) >> PAGE_SHIFT; + + return 0; +} + +#ifdef CONFIG_NUMA +int pcibus_to_node(struct pci_bus *pbus) +{ + struct pci_pbm_info *pbm = pbus->sysdata; + + return pbm->numa_node; +} +EXPORT_SYMBOL(pcibus_to_node); +#endif + +/* Return the domain number for this pci bus */ + +int pci_domain_nr(struct pci_bus *pbus) +{ + struct pci_pbm_info *pbm = pbus->sysdata; + int ret; + + if (!pbm) { + ret = -ENXIO; + } else { + ret = pbm->index; + } + + return ret; +} +EXPORT_SYMBOL(pci_domain_nr); + +#ifdef CONFIG_PCI_MSI +int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) +{ + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; + unsigned int irq; + + if (!pbm->setup_msi_irq) + return -EINVAL; + + return pbm->setup_msi_irq(&irq, pdev, desc); +} + +void arch_teardown_msi_irq(unsigned int irq) +{ + struct msi_desc *entry = irq_get_msi_desc(irq); + struct pci_dev *pdev = msi_desc_to_pci_dev(entry); + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; + + if (pbm->teardown_msi_irq) + pbm->teardown_msi_irq(irq, pdev); +} +#endif /* !(CONFIG_PCI_MSI) */ + +/* ALI sound chips generate 31-bits of DMA, a special register + * determines what bit 31 is emitted as. + */ +int ali_sound_dma_hack(struct device *dev, u64 device_mask) +{ + struct iommu *iommu = dev->archdata.iommu; + struct pci_dev *ali_isa_bridge; + u8 val; + + if (!dev_is_pci(dev)) + return 0; + + if (to_pci_dev(dev)->vendor != PCI_VENDOR_ID_AL || + to_pci_dev(dev)->device != PCI_DEVICE_ID_AL_M5451 || + device_mask != 0x7fffffff) + return 0; + + ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, + PCI_DEVICE_ID_AL_M1533, + NULL); + + pci_read_config_byte(ali_isa_bridge, 0x7e, &val); + if (iommu->dma_addr_mask & 0x80000000) + val |= 0x01; + else + val &= ~0x01; + pci_write_config_byte(ali_isa_bridge, 0x7e, val); + pci_dev_put(ali_isa_bridge); + return 1; +} + +void pci_resource_to_user(const struct pci_dev *pdev, int bar, + const struct resource *rp, resource_size_t *start, + resource_size_t *end) +{ + struct pci_bus_region region; + + /* + * "User" addresses are shown in /sys/devices/pci.../.../resource + * and /proc/bus/pci/devices and used as mmap offsets for + * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()). + * + * On sparc, these are PCI bus addresses, i.e., raw BAR values. + */ + pcibios_resource_to_bus(pdev->bus, ®ion, (struct resource *) rp); + *start = region.start; + *end = region.end; +} + +void pcibios_set_master(struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + +#ifdef CONFIG_PCI_IOV +int pcibios_device_add(struct pci_dev *dev) +{ + struct pci_dev *pdev; + + /* Add sriov arch specific initialization here. + * Copy dev_archdata from PF to VF + */ + if (dev->is_virtfn) { + struct dev_archdata *psd; + + pdev = dev->physfn; + psd = &pdev->dev.archdata; + pci_init_dev_archdata(&dev->dev.archdata, psd->iommu, + psd->stc, psd->host_controller, NULL, + psd->numa_node); + } + return 0; +} +#endif /* CONFIG_PCI_IOV */ + +static int __init pcibios_init(void) +{ + pci_dfl_cache_line_size = 64 >> 2; + return 0; +} +subsys_initcall(pcibios_init); + +#ifdef CONFIG_SYSFS + +#define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */ + +static void pcie_bus_slot_names(struct pci_bus *pbus) +{ + struct pci_dev *pdev; + struct pci_bus *bus; + + list_for_each_entry(pdev, &pbus->devices, bus_list) { + char name[SLOT_NAME_SIZE]; + struct pci_slot *pci_slot; + const u32 *slot_num; + int len; + + slot_num = of_get_property(pdev->dev.of_node, + "physical-slot#", &len); + + if (slot_num == NULL || len != 4) + continue; + + snprintf(name, sizeof(name), "%u", slot_num[0]); + pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL); + + if (IS_ERR(pci_slot)) + pr_err("PCI: pci_create_slot returned %ld.\n", + PTR_ERR(pci_slot)); + } + + list_for_each_entry(bus, &pbus->children, node) + pcie_bus_slot_names(bus); +} + +static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) +{ + const struct pci_slot_names { + u32 slot_mask; + char names[0]; + } *prop; + const char *sp; + int len, i; + u32 mask; + + prop = of_get_property(node, "slot-names", &len); + if (!prop) + return; + + mask = prop->slot_mask; + sp = prop->names; + + if (ofpci_verbose) + pci_info(bus, "Making slots for [%pOF] mask[0x%02x]\n", + node, mask); + + i = 0; + while (mask) { + struct pci_slot *pci_slot; + u32 this_bit = 1 << i; + + if (!(mask & this_bit)) { + i++; + continue; + } + + if (ofpci_verbose) + pci_info(bus, "Making slot [%s]\n", sp); + + pci_slot = pci_create_slot(bus, i, sp, NULL); + if (IS_ERR(pci_slot)) + pci_err(bus, "pci_create_slot returned %ld\n", + PTR_ERR(pci_slot)); + + sp += strlen(sp) + 1; + mask &= ~this_bit; + i++; + } +} + +static int __init of_pci_slot_init(void) +{ + struct pci_bus *pbus = NULL; + + while ((pbus = pci_find_next_bus(pbus)) != NULL) { + struct device_node *node; + struct pci_dev *pdev; + + pdev = list_first_entry(&pbus->devices, struct pci_dev, + bus_list); + + if (pdev && pci_is_pcie(pdev)) { + pcie_bus_slot_names(pbus); + } else { + + if (pbus->self) { + + /* PCI->PCI bridge */ + node = pbus->self->dev.of_node; + + } else { + struct pci_pbm_info *pbm = pbus->sysdata; + + /* Host PCI controller */ + node = pbm->op->dev.of_node; + } + + pci_bus_slot_names(node, pbus); + } + } + + return 0; +} +device_initcall(of_pci_slot_init); +#endif diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c new file mode 100644 index 000000000..4759ccd54 --- /dev/null +++ b/arch/sparc/kernel/pci_common.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pci_common.c: PCI controller common support. + * + * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "pci_impl.h" +#include "pci_sun4v.h" + +static int config_out_of_range(struct pci_pbm_info *pbm, + unsigned long bus, + unsigned long devfn, + unsigned long reg) +{ + if (bus < pbm->pci_first_busno || + bus > pbm->pci_last_busno) + return 1; + return 0; +} + +static void *sun4u_config_mkaddr(struct pci_pbm_info *pbm, + unsigned long bus, + unsigned long devfn, + unsigned long reg) +{ + unsigned long rbits = pbm->config_space_reg_bits; + + if (config_out_of_range(pbm, bus, devfn, reg)) + return NULL; + + reg = (reg & ((1 << rbits) - 1)); + devfn <<= rbits; + bus <<= rbits + 8; + + return (void *) (pbm->config_space | bus | devfn | reg); +} + +/* At least on Sabre, it is necessary to access all PCI host controller + * registers at their natural size, otherwise zeros are returned. + * Strange but true, and I see no language in the UltraSPARC-IIi + * programmer's manual that mentions this even indirectly. + */ +static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm, + unsigned char bus, unsigned int devfn, + int where, int size, u32 *value) +{ + u32 tmp32, *addr; + u16 tmp16; + u8 tmp8; + + addr = sun4u_config_mkaddr(pbm, bus, devfn, where); + if (!addr) + return PCIBIOS_SUCCESSFUL; + + switch (size) { + case 1: + if (where < 8) { + unsigned long align = (unsigned long) addr; + + align &= ~1; + pci_config_read16((u16 *)align, &tmp16); + if (where & 1) + *value = tmp16 >> 8; + else + *value = tmp16 & 0xff; + } else { + pci_config_read8((u8 *)addr, &tmp8); + *value = (u32) tmp8; + } + break; + + case 2: + if (where < 8) { + pci_config_read16((u16 *)addr, &tmp16); + *value = (u32) tmp16; + } else { + pci_config_read8((u8 *)addr, &tmp8); + *value = (u32) tmp8; + pci_config_read8(((u8 *)addr) + 1, &tmp8); + *value |= ((u32) tmp8) << 8; + } + break; + + case 4: + tmp32 = 0xffffffff; + sun4u_read_pci_cfg_host(pbm, bus, devfn, + where, 2, &tmp32); + *value = tmp32; + + tmp32 = 0xffffffff; + sun4u_read_pci_cfg_host(pbm, bus, devfn, + where + 2, 2, &tmp32); + *value |= tmp32 << 16; + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + int where, int size, u32 *value) +{ + struct pci_pbm_info *pbm = bus_dev->sysdata; + unsigned char bus = bus_dev->number; + u32 *addr; + u16 tmp16; + u8 tmp8; + + switch (size) { + case 1: + *value = 0xff; + break; + case 2: + *value = 0xffff; + break; + case 4: + *value = 0xffffffff; + break; + } + + if (!bus_dev->number && !PCI_SLOT(devfn)) + return sun4u_read_pci_cfg_host(pbm, bus, devfn, where, + size, value); + + addr = sun4u_config_mkaddr(pbm, bus, devfn, where); + if (!addr) + return PCIBIOS_SUCCESSFUL; + + switch (size) { + case 1: + pci_config_read8((u8 *)addr, &tmp8); + *value = (u32) tmp8; + break; + + case 2: + if (where & 0x01) { + printk("pci_read_config_word: misaligned reg [%x]\n", + where); + return PCIBIOS_SUCCESSFUL; + } + pci_config_read16((u16 *)addr, &tmp16); + *value = (u32) tmp16; + break; + + case 4: + if (where & 0x03) { + printk("pci_read_config_dword: misaligned reg [%x]\n", + where); + return PCIBIOS_SUCCESSFUL; + } + pci_config_read32(addr, value); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int sun4u_write_pci_cfg_host(struct pci_pbm_info *pbm, + unsigned char bus, unsigned int devfn, + int where, int size, u32 value) +{ + u32 *addr; + + addr = sun4u_config_mkaddr(pbm, bus, devfn, where); + if (!addr) + return PCIBIOS_SUCCESSFUL; + + switch (size) { + case 1: + if (where < 8) { + unsigned long align = (unsigned long) addr; + u16 tmp16; + + align &= ~1; + pci_config_read16((u16 *)align, &tmp16); + if (where & 1) { + tmp16 &= 0x00ff; + tmp16 |= value << 8; + } else { + tmp16 &= 0xff00; + tmp16 |= value; + } + pci_config_write16((u16 *)align, tmp16); + } else + pci_config_write8((u8 *)addr, value); + break; + case 2: + if (where < 8) { + pci_config_write16((u16 *)addr, value); + } else { + pci_config_write8((u8 *)addr, value & 0xff); + pci_config_write8(((u8 *)addr) + 1, value >> 8); + } + break; + case 4: + sun4u_write_pci_cfg_host(pbm, bus, devfn, + where, 2, value & 0xffff); + sun4u_write_pci_cfg_host(pbm, bus, devfn, + where + 2, 2, value >> 16); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + int where, int size, u32 value) +{ + struct pci_pbm_info *pbm = bus_dev->sysdata; + unsigned char bus = bus_dev->number; + u32 *addr; + + if (!bus_dev->number && !PCI_SLOT(devfn)) + return sun4u_write_pci_cfg_host(pbm, bus, devfn, where, + size, value); + + addr = sun4u_config_mkaddr(pbm, bus, devfn, where); + if (!addr) + return PCIBIOS_SUCCESSFUL; + + switch (size) { + case 1: + pci_config_write8((u8 *)addr, value); + break; + + case 2: + if (where & 0x01) { + printk("pci_write_config_word: misaligned reg [%x]\n", + where); + return PCIBIOS_SUCCESSFUL; + } + pci_config_write16((u16 *)addr, value); + break; + + case 4: + if (where & 0x03) { + printk("pci_write_config_dword: misaligned reg [%x]\n", + where); + return PCIBIOS_SUCCESSFUL; + } + pci_config_write32(addr, value); + } + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops sun4u_pci_ops = { + .read = sun4u_read_pci_cfg, + .write = sun4u_write_pci_cfg, +}; + +static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + int where, int size, u32 *value) +{ + struct pci_pbm_info *pbm = bus_dev->sysdata; + u32 devhandle = pbm->devhandle; + unsigned int bus = bus_dev->number; + unsigned int device = PCI_SLOT(devfn); + unsigned int func = PCI_FUNC(devfn); + unsigned long ret; + + if (config_out_of_range(pbm, bus, devfn, where)) { + ret = ~0UL; + } else { + ret = pci_sun4v_config_get(devhandle, + HV_PCI_DEVICE_BUILD(bus, device, func), + where, size); + } + switch (size) { + case 1: + *value = ret & 0xff; + break; + case 2: + *value = ret & 0xffff; + break; + case 4: + *value = ret & 0xffffffff; + break; + } + + + return PCIBIOS_SUCCESSFUL; +} + +static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + int where, int size, u32 value) +{ + struct pci_pbm_info *pbm = bus_dev->sysdata; + u32 devhandle = pbm->devhandle; + unsigned int bus = bus_dev->number; + unsigned int device = PCI_SLOT(devfn); + unsigned int func = PCI_FUNC(devfn); + + if (config_out_of_range(pbm, bus, devfn, where)) { + /* Do nothing. */ + } else { + /* We don't check for hypervisor errors here, but perhaps + * we should and influence our return value depending upon + * what kind of error is thrown. + */ + pci_sun4v_config_put(devhandle, + HV_PCI_DEVICE_BUILD(bus, device, func), + where, size, value); + } + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops sun4v_pci_ops = { + .read = sun4v_read_pci_cfg, + .write = sun4v_write_pci_cfg, +}; + +void pci_get_pbm_props(struct pci_pbm_info *pbm) +{ + const u32 *val = of_get_property(pbm->op->dev.of_node, "bus-range", NULL); + + pbm->pci_first_busno = val[0]; + pbm->pci_last_busno = val[1]; + + val = of_get_property(pbm->op->dev.of_node, "ino-bitmap", NULL); + if (val) { + pbm->ino_bitmap = (((u64)val[1] << 32UL) | + ((u64)val[0] << 0UL)); + } +} + +static void pci_register_iommu_region(struct pci_pbm_info *pbm) +{ + const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", + NULL); + + if (vdma) { + struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); + + if (!rp) { + pr_info("%s: Cannot allocate IOMMU resource.\n", + pbm->name); + return; + } + rp->name = "IOMMU"; + rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; + rp->end = rp->start + (unsigned long) vdma[1] - 1UL; + rp->flags = IORESOURCE_BUSY; + if (request_resource(&pbm->mem_space, rp)) { + pr_info("%s: Unable to request IOMMU resource.\n", + pbm->name); + kfree(rp); + } + } +} + +void pci_determine_mem_io_space(struct pci_pbm_info *pbm) +{ + const struct linux_prom_pci_ranges *pbm_ranges; + int i, saw_mem, saw_io; + int num_pbm_ranges; + + /* Corresponding generic code in of_pci_get_host_bridge_resources() */ + + saw_mem = saw_io = 0; + pbm_ranges = of_get_property(pbm->op->dev.of_node, "ranges", &i); + if (!pbm_ranges) { + prom_printf("PCI: Fatal error, missing PBM ranges property " + " for %s\n", + pbm->name); + prom_halt(); + } + + num_pbm_ranges = i / sizeof(*pbm_ranges); + memset(&pbm->mem64_space, 0, sizeof(struct resource)); + + for (i = 0; i < num_pbm_ranges; i++) { + const struct linux_prom_pci_ranges *pr = &pbm_ranges[i]; + unsigned long a, size, region_a; + u32 parent_phys_hi, parent_phys_lo; + u32 child_phys_mid, child_phys_lo; + u32 size_hi, size_lo; + int type; + + parent_phys_hi = pr->parent_phys_hi; + parent_phys_lo = pr->parent_phys_lo; + child_phys_mid = pr->child_phys_mid; + child_phys_lo = pr->child_phys_lo; + if (tlb_type == hypervisor) + parent_phys_hi &= 0x0fffffff; + + size_hi = pr->size_hi; + size_lo = pr->size_lo; + + type = (pr->child_phys_hi >> 24) & 0x3; + a = (((unsigned long)parent_phys_hi << 32UL) | + ((unsigned long)parent_phys_lo << 0UL)); + region_a = (((unsigned long)child_phys_mid << 32UL) | + ((unsigned long)child_phys_lo << 0UL)); + size = (((unsigned long)size_hi << 32UL) | + ((unsigned long)size_lo << 0UL)); + + switch (type) { + case 0: + /* PCI config space, 16MB */ + pbm->config_space = a; + break; + + case 1: + /* 16-bit IO space, 16MB */ + pbm->io_space.start = a; + pbm->io_space.end = a + size - 1UL; + pbm->io_space.flags = IORESOURCE_IO; + pbm->io_offset = a - region_a; + saw_io = 1; + break; + + case 2: + /* 32-bit MEM space, 2GB */ + pbm->mem_space.start = a; + pbm->mem_space.end = a + size - 1UL; + pbm->mem_space.flags = IORESOURCE_MEM; + pbm->mem_offset = a - region_a; + saw_mem = 1; + break; + + case 3: + /* 64-bit MEM handling */ + pbm->mem64_space.start = a; + pbm->mem64_space.end = a + size - 1UL; + pbm->mem64_space.flags = IORESOURCE_MEM; + pbm->mem64_offset = a - region_a; + saw_mem = 1; + break; + + default: + break; + } + } + + if (!saw_io || !saw_mem) { + prom_printf("%s: Fatal error, missing %s PBM range.\n", + pbm->name, + (!saw_io ? "IO" : "MEM")); + prom_halt(); + } + + if (pbm->io_space.flags) + printk("%s: PCI IO %pR offset %llx\n", + pbm->name, &pbm->io_space, pbm->io_offset); + if (pbm->mem_space.flags) + printk("%s: PCI MEM %pR offset %llx\n", + pbm->name, &pbm->mem_space, pbm->mem_offset); + if (pbm->mem64_space.flags && pbm->mem_space.flags) { + if (pbm->mem64_space.start <= pbm->mem_space.end) + pbm->mem64_space.start = pbm->mem_space.end + 1; + if (pbm->mem64_space.start > pbm->mem64_space.end) + pbm->mem64_space.flags = 0; + } + + if (pbm->mem64_space.flags) + printk("%s: PCI MEM64 %pR offset %llx\n", + pbm->name, &pbm->mem64_space, pbm->mem64_offset); + + pbm->io_space.name = pbm->mem_space.name = pbm->name; + pbm->mem64_space.name = pbm->name; + + request_resource(&ioport_resource, &pbm->io_space); + request_resource(&iomem_resource, &pbm->mem_space); + if (pbm->mem64_space.flags) + request_resource(&iomem_resource, &pbm->mem64_space); + + pci_register_iommu_region(pbm); +} + +/* Generic helper routines for PCI error reporting. */ +void pci_scan_for_target_abort(struct pci_pbm_info *pbm, + struct pci_bus *pbus) +{ + struct pci_dev *pdev; + struct pci_bus *bus; + + list_for_each_entry(pdev, &pbus->devices, bus_list) { + u16 status, error_bits; + + pci_read_config_word(pdev, PCI_STATUS, &status); + error_bits = + (status & (PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT)); + if (error_bits) { + pci_write_config_word(pdev, PCI_STATUS, error_bits); + pci_info(pdev, "%s: Device saw Target Abort [%016x]\n", + pbm->name, status); + } + } + + list_for_each_entry(bus, &pbus->children, node) + pci_scan_for_target_abort(pbm, bus); +} + +void pci_scan_for_master_abort(struct pci_pbm_info *pbm, + struct pci_bus *pbus) +{ + struct pci_dev *pdev; + struct pci_bus *bus; + + list_for_each_entry(pdev, &pbus->devices, bus_list) { + u16 status, error_bits; + + pci_read_config_word(pdev, PCI_STATUS, &status); + error_bits = + (status & (PCI_STATUS_REC_MASTER_ABORT)); + if (error_bits) { + pci_write_config_word(pdev, PCI_STATUS, error_bits); + pci_info(pdev, "%s: Device received Master Abort " + "[%016x]\n", pbm->name, status); + } + } + + list_for_each_entry(bus, &pbus->children, node) + pci_scan_for_master_abort(pbm, bus); +} + +void pci_scan_for_parity_error(struct pci_pbm_info *pbm, + struct pci_bus *pbus) +{ + struct pci_dev *pdev; + struct pci_bus *bus; + + list_for_each_entry(pdev, &pbus->devices, bus_list) { + u16 status, error_bits; + + pci_read_config_word(pdev, PCI_STATUS, &status); + error_bits = + (status & (PCI_STATUS_PARITY | + PCI_STATUS_DETECTED_PARITY)); + if (error_bits) { + pci_write_config_word(pdev, PCI_STATUS, error_bits); + pci_info(pdev, "%s: Device saw Parity Error [%016x]\n", + pbm->name, status); + } + } + + list_for_each_entry(bus, &pbus->children, node) + pci_scan_for_parity_error(pbm, bus); +} diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c new file mode 100644 index 000000000..0ca08d455 --- /dev/null +++ b/arch/sparc/kernel/pci_fire.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pci_fire.c: Sun4u platform PCI-E controller support. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "pci_impl.h" + +#define DRIVER_NAME "fire" +#define PFX DRIVER_NAME ": " + +#define FIRE_IOMMU_CONTROL 0x40000UL +#define FIRE_IOMMU_TSBBASE 0x40008UL +#define FIRE_IOMMU_FLUSH 0x40100UL +#define FIRE_IOMMU_FLUSHINV 0x40108UL + +static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm) +{ + struct iommu *iommu = pbm->iommu; + u32 vdma[2], dma_mask; + u64 control; + int tsbsize, err; + + /* No virtual-dma property on these guys, use largest size. */ + vdma[0] = 0xc0000000; /* base */ + vdma[1] = 0x40000000; /* size */ + dma_mask = 0xffffffff; + tsbsize = 128; + + /* Register addresses. */ + iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL; + iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE; + iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH; + iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV; + + /* We use the main control/status register of FIRE as the write + * completion register. + */ + iommu->write_complete_reg = pbm->controller_regs + 0x410000UL; + + /* + * Invalidate TLB Entries. + */ + upa_writeq(~(u64)0, iommu->iommu_flushinv); + + err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask, + pbm->numa_node); + if (err) + return err; + + upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase); + + control = upa_readq(iommu->iommu_control); + control |= (0x00000400 /* TSB cache snoop enable */ | + 0x00000300 /* Cache mode */ | + 0x00000002 /* Bypass enable */ | + 0x00000001 /* Translation enable */); + upa_writeq(control, iommu->iommu_control); + + return 0; +} + +#ifdef CONFIG_PCI_MSI +struct pci_msiq_entry { + u64 word0; +#define MSIQ_WORD0_RESV 0x8000000000000000UL +#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL +#define MSIQ_WORD0_FMT_TYPE_SHIFT 56 +#define MSIQ_WORD0_LEN 0x00ffc00000000000UL +#define MSIQ_WORD0_LEN_SHIFT 46 +#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL +#define MSIQ_WORD0_ADDR0_SHIFT 32 +#define MSIQ_WORD0_RID 0x00000000ffff0000UL +#define MSIQ_WORD0_RID_SHIFT 16 +#define MSIQ_WORD0_DATA0 0x000000000000ffffUL +#define MSIQ_WORD0_DATA0_SHIFT 0 + +#define MSIQ_TYPE_MSG 0x6 +#define MSIQ_TYPE_MSI32 0xb +#define MSIQ_TYPE_MSI64 0xf + + u64 word1; +#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL +#define MSIQ_WORD1_ADDR1_SHIFT 16 +#define MSIQ_WORD1_DATA1 0x000000000000ffffUL +#define MSIQ_WORD1_DATA1_SHIFT 0 + + u64 resv[6]; +}; + +/* All MSI registers are offset from pbm->pbm_regs */ +#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL +#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL + +#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL) +#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL +#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL + +#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL) +#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL +#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL +#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL + +#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL) +#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL +#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL +#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL +#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL + +#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL) +#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL +#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL + +#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL) +#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL + +#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL) +#define MSI_MAP_VALID 0x8000000000000000UL +#define MSI_MAP_EQWR_N 0x4000000000000000UL +#define MSI_MAP_EQNUM 0x000000000000003fUL + +#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL) +#define MSI_CLEAR_EQWR_N 0x4000000000000000UL + +#define IMONDO_DATA0 0x02C000UL +#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL + +#define IMONDO_DATA1 0x02C008UL +#define IMONDO_DATA1_DATA 0xffffffffffffffffUL + +#define MSI_32BIT_ADDR 0x034000UL +#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL + +#define MSI_64BIT_ADDR 0x034008UL +#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL + +static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long *head) +{ + *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); + return 0; +} + +static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long *head, unsigned long *msi) +{ + unsigned long type_fmt, type, msi_num; + struct pci_msiq_entry *base, *ep; + + base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192)); + ep = &base[*head]; + + if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0) + return 0; + + type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >> + MSIQ_WORD0_FMT_TYPE_SHIFT); + type = (type_fmt >> 3); + if (unlikely(type != MSIQ_TYPE_MSI32 && + type != MSIQ_TYPE_MSI64)) + return -EINVAL; + + *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >> + MSIQ_WORD0_DATA0_SHIFT); + + upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num)); + + /* Clear the entry. */ + ep->word0 &= ~MSIQ_WORD0_FMT_TYPE; + + /* Go to next entry in ring. */ + (*head)++; + if (*head >= pbm->msiq_ent_count) + *head = 0; + + return 1; +} + +static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long head) +{ + upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); + return 0; +} + +static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long msi, int is_msi64) +{ + u64 val; + + val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); + val &= ~(MSI_MAP_EQNUM); + val |= msiqid; + upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); + + upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi)); + + val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); + val |= MSI_MAP_VALID; + upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); + + return 0; +} + +static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) +{ + u64 val; + + val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); + + val &= ~MSI_MAP_VALID; + + upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); + + return 0; +} + +static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm) +{ + unsigned long pages, order, i; + + order = get_order(512 * 1024); + pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); + if (pages == 0UL) { + printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", + order); + return -ENOMEM; + } + memset((char *)pages, 0, PAGE_SIZE << order); + pbm->msi_queues = (void *) pages; + + upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES | + __pa(pbm->msi_queues)), + pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG); + + upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0); + upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1); + + upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR); + upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR); + + for (i = 0; i < pbm->msiq_num; i++) { + upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i)); + upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i)); + } + + return 0; +} + +static void pci_fire_msiq_free(struct pci_pbm_info *pbm) +{ + unsigned long pages, order; + + order = get_order(512 * 1024); + pages = (unsigned long) pbm->msi_queues; + + free_pages(pages, order); + + pbm->msi_queues = NULL; +} + +static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm, + unsigned long msiqid, + unsigned long devino) +{ + unsigned long cregs = (unsigned long) pbm->pbm_regs; + unsigned long imap_reg, iclr_reg, int_ctrlr; + unsigned int irq; + int fixup; + u64 val; + + imap_reg = cregs + (0x001000UL + (devino * 0x08UL)); + iclr_reg = cregs + (0x001400UL + (devino * 0x08UL)); + + /* XXX iterate amongst the 4 IRQ controllers XXX */ + int_ctrlr = (1UL << 6); + + val = upa_readq(imap_reg); + val |= (1UL << 63) | int_ctrlr; + upa_writeq(val, imap_reg); + + fixup = ((pbm->portid << 6) | devino) - int_ctrlr; + + irq = build_irq(fixup, iclr_reg, imap_reg); + if (!irq) + return -ENOMEM; + + upa_writeq(EVENT_QUEUE_CONTROL_SET_EN, + pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid)); + + return irq; +} + +static const struct sparc64_msiq_ops pci_fire_msiq_ops = { + .get_head = pci_fire_get_head, + .dequeue_msi = pci_fire_dequeue_msi, + .set_head = pci_fire_set_head, + .msi_setup = pci_fire_msi_setup, + .msi_teardown = pci_fire_msi_teardown, + .msiq_alloc = pci_fire_msiq_alloc, + .msiq_free = pci_fire_msiq_free, + .msiq_build_irq = pci_fire_msiq_build_irq, +}; + +static void pci_fire_msi_init(struct pci_pbm_info *pbm) +{ + sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops); +} +#else /* CONFIG_PCI_MSI */ +static void pci_fire_msi_init(struct pci_pbm_info *pbm) +{ +} +#endif /* !(CONFIG_PCI_MSI) */ + +/* Based at pbm->controller_regs */ +#define FIRE_PARITY_CONTROL 0x470010UL +#define FIRE_PARITY_ENAB 0x8000000000000000UL +#define FIRE_FATAL_RESET_CTL 0x471028UL +#define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL +#define FIRE_FATAL_RESET_MB 0x0000000002000000UL +#define FIRE_FATAL_RESET_CPE 0x0000000000008000UL +#define FIRE_FATAL_RESET_APE 0x0000000000004000UL +#define FIRE_FATAL_RESET_PIO 0x0000000000000040UL +#define FIRE_FATAL_RESET_JW 0x0000000000000004UL +#define FIRE_FATAL_RESET_JI 0x0000000000000002UL +#define FIRE_FATAL_RESET_JR 0x0000000000000001UL +#define FIRE_CORE_INTR_ENABLE 0x471800UL + +/* Based at pbm->pbm_regs */ +#define FIRE_TLU_CTRL 0x80000UL +#define FIRE_TLU_CTRL_TIM 0x00000000da000000UL +#define FIRE_TLU_CTRL_QDET 0x0000000000000100UL +#define FIRE_TLU_CTRL_CFG 0x0000000000000001UL +#define FIRE_TLU_DEV_CTRL 0x90008UL +#define FIRE_TLU_LINK_CTRL 0x90020UL +#define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL +#define FIRE_LPU_RESET 0xe2008UL +#define FIRE_LPU_LLCFG 0xe2200UL +#define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL +#define FIRE_LPU_FCTRL_UCTRL 0xe2240UL +#define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL +#define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL +#define FIRE_LPU_TXL_FIFOP 0xe2430UL +#define FIRE_LPU_LTSSM_CFG2 0xe2788UL +#define FIRE_LPU_LTSSM_CFG3 0xe2790UL +#define FIRE_LPU_LTSSM_CFG4 0xe2798UL +#define FIRE_LPU_LTSSM_CFG5 0xe27a0UL +#define FIRE_DMC_IENAB 0x31800UL +#define FIRE_DMC_DBG_SEL_A 0x53000UL +#define FIRE_DMC_DBG_SEL_B 0x53008UL +#define FIRE_PEC_IENAB 0x51800UL + +static void pci_fire_hw_init(struct pci_pbm_info *pbm) +{ + u64 val; + + upa_writeq(FIRE_PARITY_ENAB, + pbm->controller_regs + FIRE_PARITY_CONTROL); + + upa_writeq((FIRE_FATAL_RESET_SPARE | + FIRE_FATAL_RESET_MB | + FIRE_FATAL_RESET_CPE | + FIRE_FATAL_RESET_APE | + FIRE_FATAL_RESET_PIO | + FIRE_FATAL_RESET_JW | + FIRE_FATAL_RESET_JI | + FIRE_FATAL_RESET_JR), + pbm->controller_regs + FIRE_FATAL_RESET_CTL); + + upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE); + + val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL); + val |= (FIRE_TLU_CTRL_TIM | + FIRE_TLU_CTRL_QDET | + FIRE_TLU_CTRL_CFG); + upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL); + upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL); + upa_writeq(FIRE_TLU_LINK_CTRL_CLK, + pbm->pbm_regs + FIRE_TLU_LINK_CTRL); + + upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET); + upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG); + upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P), + pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL); + upa_writeq(((0xffff << 16) | (0x0000 << 0)), + pbm->pbm_regs + FIRE_LPU_TXL_FIFOP); + upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2); + upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3); + upa_writeq((2 << 16) | (140 << 8), + pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4); + upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5); + + upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB); + upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A); + upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B); + + upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB); +} + +static int pci_fire_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op, u32 portid) +{ + const struct linux_prom64_registers *regs; + struct device_node *dp = op->dev.of_node; + int err; + + pbm->numa_node = NUMA_NO_NODE; + + pbm->pci_ops = &sun4u_pci_ops; + pbm->config_space_reg_bits = 12; + + pbm->index = pci_num_pbms++; + + pbm->portid = portid; + pbm->op = op; + pbm->name = dp->full_name; + + regs = of_get_property(dp, "reg", NULL); + pbm->pbm_regs = regs[0].phys_addr; + pbm->controller_regs = regs[1].phys_addr - 0x410000UL; + + printk("%s: SUN4U PCIE Bus Module\n", pbm->name); + + pci_determine_mem_io_space(pbm); + + pci_get_pbm_props(pbm); + + pci_fire_hw_init(pbm); + + err = pci_fire_pbm_iommu_init(pbm); + if (err) + return err; + + pci_fire_msi_init(pbm); + + pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev); + + /* XXX register error interrupt handlers XXX */ + + pbm->next = pci_pbm_root; + pci_pbm_root = pbm; + + return 0; +} + +static int fire_probe(struct platform_device *op) +{ + struct device_node *dp = op->dev.of_node; + struct pci_pbm_info *pbm; + struct iommu *iommu; + u32 portid; + int err; + + portid = of_getintprop_default(dp, "portid", 0xff); + + err = -ENOMEM; + pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); + if (!pbm) { + printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n"); + goto out_err; + } + + iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); + if (!iommu) { + printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n"); + goto out_free_controller; + } + + pbm->iommu = iommu; + + err = pci_fire_pbm_init(pbm, op, portid); + if (err) + goto out_free_iommu; + + dev_set_drvdata(&op->dev, pbm); + + return 0; + +out_free_iommu: + kfree(pbm->iommu); + +out_free_controller: + kfree(pbm); + +out_err: + return err; +} + +static const struct of_device_id fire_match[] = { + { + .name = "pci", + .compatible = "pciex108e,80f0", + }, + {}, +}; + +static struct platform_driver fire_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = fire_match, + }, + .probe = fire_probe, +}; + +static int __init fire_init(void) +{ + return platform_driver_register(&fire_driver); +} + +subsys_initcall(fire_init); diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h new file mode 100644 index 000000000..4e3d15189 --- /dev/null +++ b/arch/sparc/kernel/pci_impl.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* pci_impl.h: Helper definitions for PCI controller support. + * + * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) + */ + +#ifndef PCI_IMPL_H +#define PCI_IMPL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* The abstraction used here is that there are PCI controllers, + * each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules + * underneath. Each PCI bus module uses an IOMMU (shared by both + * PBMs of a controller, or per-PBM), and if a streaming buffer + * is present, each PCI bus module has it's own. (ie. the IOMMU + * might be shared between PBMs, the STC is never shared) + * Furthermore, each PCI bus module controls it's own autonomous + * PCI bus. + */ + +#define PCI_STC_FLUSHFLAG_INIT(STC) \ + (*((STC)->strbuf_flushflag) = 0UL) +#define PCI_STC_FLUSHFLAG_SET(STC) \ + (*((STC)->strbuf_flushflag) != 0UL) + +#ifdef CONFIG_PCI_MSI +struct pci_pbm_info; +struct sparc64_msiq_ops { + int (*get_head)(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long *head); + int (*dequeue_msi)(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long *head, unsigned long *msi); + int (*set_head)(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long head); + int (*msi_setup)(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long msi, int is_msi64); + int (*msi_teardown)(struct pci_pbm_info *pbm, unsigned long msi); + int (*msiq_alloc)(struct pci_pbm_info *pbm); + void (*msiq_free)(struct pci_pbm_info *pbm); + int (*msiq_build_irq)(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long devino); +}; + +void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, + const struct sparc64_msiq_ops *ops); + +struct sparc64_msiq_cookie { + struct pci_pbm_info *pbm; + unsigned long msiqid; +}; +#endif + +struct pci_pbm_info { + struct pci_pbm_info *next; + struct pci_pbm_info *sibling; + int index; + + /* Physical address base of controller registers. */ + unsigned long controller_regs; + + /* Physical address base of PBM registers. */ + unsigned long pbm_regs; + + /* Physical address of DMA sync register, if any. */ + unsigned long sync_reg; + + /* Opaque 32-bit system bus Port ID. */ + u32 portid; + + /* Opaque 32-bit handle used for hypervisor calls. */ + u32 devhandle; + + /* Chipset version information. */ + int chip_type; +#define PBM_CHIP_TYPE_SABRE 1 +#define PBM_CHIP_TYPE_PSYCHO 2 +#define PBM_CHIP_TYPE_SCHIZO 3 +#define PBM_CHIP_TYPE_SCHIZO_PLUS 4 +#define PBM_CHIP_TYPE_TOMATILLO 5 + int chip_version; + int chip_revision; + + /* Name used for top-level resources. */ + const char *name; + + /* OBP specific information. */ + struct platform_device *op; + u64 ino_bitmap; + + /* PBM I/O and Memory space resources. */ + struct resource io_space; + struct resource mem_space; + struct resource mem64_space; + struct resource busn; + /* offset */ + resource_size_t io_offset; + resource_size_t mem_offset; + resource_size_t mem64_offset; + + /* Base of PCI Config space, can be per-PBM or shared. */ + unsigned long config_space; + + /* This will be 12 on PCI-E controllers, 8 elsewhere. */ + unsigned long config_space_reg_bits; + + unsigned long pci_afsr; + unsigned long pci_afar; + unsigned long pci_csr; + + /* State of 66MHz capabilities on this PBM. */ + int is_66mhz_capable; + int all_devs_66mhz; + +#ifdef CONFIG_PCI_MSI + /* MSI info. */ + u32 msiq_num; + u32 msiq_ent_count; + u32 msiq_first; + u32 msiq_first_devino; + u32 msiq_rotor; + struct sparc64_msiq_cookie *msiq_irq_cookies; + u32 msi_num; + u32 msi_first; + u32 msi_data_mask; + u32 msix_data_width; + u64 msi32_start; + u64 msi64_start; + u32 msi32_len; + u32 msi64_len; + void *msi_queues; + unsigned long *msi_bitmap; + unsigned int *msi_irq_table; + int (*setup_msi_irq)(unsigned int *irq_p, struct pci_dev *pdev, + struct msi_desc *entry); + void (*teardown_msi_irq)(unsigned int irq, struct pci_dev *pdev); + const struct sparc64_msiq_ops *msi_ops; +#endif /* !(CONFIG_PCI_MSI) */ + + /* This PBM's streaming buffer. */ + struct strbuf stc; + + /* IOMMU state, potentially shared by both PBM segments. */ + struct iommu *iommu; + + /* Now things for the actual PCI bus probes. */ + unsigned int pci_first_busno; + unsigned int pci_last_busno; + struct pci_bus *pci_bus; + struct pci_ops *pci_ops; + + int numa_node; +}; + +extern struct pci_pbm_info *pci_pbm_root; + +extern int pci_num_pbms; + +/* PCI bus scanning and fixup support. */ +void pci_get_pbm_props(struct pci_pbm_info *pbm); +struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, + struct device *parent); +void pci_determine_mem_io_space(struct pci_pbm_info *pbm); + +/* Error reporting support. */ +void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *); +void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *); +void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *); + +/* Configuration space access. */ +void pci_config_read8(u8 *addr, u8 *ret); +void pci_config_read16(u16 *addr, u16 *ret); +void pci_config_read32(u32 *addr, u32 *ret); +void pci_config_write8(u8 *addr, u8 val); +void pci_config_write16(u16 *addr, u16 val); +void pci_config_write32(u32 *addr, u32 val); + +extern struct pci_ops sun4u_pci_ops; +extern struct pci_ops sun4v_pci_ops; + +extern volatile int pci_poke_in_progress; +extern volatile int pci_poke_cpu; +extern volatile int pci_poke_faulted; + +#endif /* !(PCI_IMPL_H) */ diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c new file mode 100644 index 000000000..9ed119857 --- /dev/null +++ b/arch/sparc/kernel/pci_msi.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pci_msi.c: Sparc64 MSI support common layer. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ +#include +#include +#include +#include + +#include "pci_impl.h" + +static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie) +{ + struct sparc64_msiq_cookie *msiq_cookie = cookie; + struct pci_pbm_info *pbm = msiq_cookie->pbm; + unsigned long msiqid = msiq_cookie->msiqid; + const struct sparc64_msiq_ops *ops; + unsigned long orig_head, head; + int err; + + ops = pbm->msi_ops; + + err = ops->get_head(pbm, msiqid, &head); + if (unlikely(err < 0)) + goto err_get_head; + + orig_head = head; + for (;;) { + unsigned long msi; + + err = ops->dequeue_msi(pbm, msiqid, &head, &msi); + if (likely(err > 0)) { + unsigned int irq; + + irq = pbm->msi_irq_table[msi - pbm->msi_first]; + generic_handle_irq(irq); + } + + if (unlikely(err < 0)) + goto err_dequeue; + + if (err == 0) + break; + } + if (likely(head != orig_head)) { + err = ops->set_head(pbm, msiqid, head); + if (unlikely(err < 0)) + goto err_set_head; + } + return IRQ_HANDLED; + +err_get_head: + printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n", + msiqid, err); + goto err_out; + +err_dequeue: + printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] " + "gives error %d\n", + head, msiqid, err); + goto err_out; + +err_set_head: + printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] " + "gives error %d\n", + head, msiqid, err); + goto err_out; + +err_out: + return IRQ_NONE; +} + +static u32 pick_msiq(struct pci_pbm_info *pbm) +{ + static DEFINE_SPINLOCK(rotor_lock); + unsigned long flags; + u32 ret, rotor; + + spin_lock_irqsave(&rotor_lock, flags); + + rotor = pbm->msiq_rotor; + ret = pbm->msiq_first + rotor; + + if (++rotor >= pbm->msiq_num) + rotor = 0; + pbm->msiq_rotor = rotor; + + spin_unlock_irqrestore(&rotor_lock, flags); + + return ret; +} + + +static int alloc_msi(struct pci_pbm_info *pbm) +{ + int i; + + for (i = 0; i < pbm->msi_num; i++) { + if (!test_and_set_bit(i, pbm->msi_bitmap)) + return i + pbm->msi_first; + } + + return -ENOENT; +} + +static void free_msi(struct pci_pbm_info *pbm, int msi_num) +{ + msi_num -= pbm->msi_first; + clear_bit(msi_num, pbm->msi_bitmap); +} + +static struct irq_chip msi_irq = { + .name = "PCI-MSI", + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + /* XXX affinity XXX */ +}; + +static int sparc64_setup_msi_irq(unsigned int *irq_p, + struct pci_dev *pdev, + struct msi_desc *entry) +{ + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; + const struct sparc64_msiq_ops *ops = pbm->msi_ops; + struct msi_msg msg; + int msi, err; + u32 msiqid; + + *irq_p = irq_alloc(0, 0); + err = -ENOMEM; + if (!*irq_p) + goto out_err; + + irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq, + "MSI"); + + err = alloc_msi(pbm); + if (unlikely(err < 0)) + goto out_irq_free; + + msi = err; + + msiqid = pick_msiq(pbm); + + err = ops->msi_setup(pbm, msiqid, msi, + (entry->pci.msi_attrib.is_64 ? 1 : 0)); + if (err) + goto out_msi_free; + + pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p; + + if (entry->pci.msi_attrib.is_64) { + msg.address_hi = pbm->msi64_start >> 32; + msg.address_lo = pbm->msi64_start & 0xffffffff; + } else { + msg.address_hi = 0; + msg.address_lo = pbm->msi32_start; + } + msg.data = msi; + + irq_set_msi_desc(*irq_p, entry); + pci_write_msi_msg(*irq_p, &msg); + + return 0; + +out_msi_free: + free_msi(pbm, msi); + +out_irq_free: + irq_set_chip(*irq_p, NULL); + irq_free(*irq_p); + *irq_p = 0; + +out_err: + return err; +} + +static void sparc64_teardown_msi_irq(unsigned int irq, + struct pci_dev *pdev) +{ + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; + const struct sparc64_msiq_ops *ops = pbm->msi_ops; + unsigned int msi_num; + int i, err; + + for (i = 0; i < pbm->msi_num; i++) { + if (pbm->msi_irq_table[i] == irq) + break; + } + if (i >= pbm->msi_num) { + pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name, + irq); + return; + } + + msi_num = pbm->msi_first + i; + pbm->msi_irq_table[i] = ~0U; + + err = ops->msi_teardown(pbm, msi_num); + if (err) { + pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, " + "irq %u, gives error %d\n", pbm->name, msi_num, irq, + err); + return; + } + + free_msi(pbm, msi_num); + + irq_set_chip(irq, NULL); + irq_free(irq); +} + +static int msi_bitmap_alloc(struct pci_pbm_info *pbm) +{ + unsigned long size, bits_per_ulong; + + bits_per_ulong = sizeof(unsigned long) * 8; + size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); + size /= 8; + BUG_ON(size % sizeof(unsigned long)); + + pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); + if (!pbm->msi_bitmap) + return -ENOMEM; + + return 0; +} + +static void msi_bitmap_free(struct pci_pbm_info *pbm) +{ + kfree(pbm->msi_bitmap); + pbm->msi_bitmap = NULL; +} + +static int msi_table_alloc(struct pci_pbm_info *pbm) +{ + int size, i; + + size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie); + pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL); + if (!pbm->msiq_irq_cookies) + return -ENOMEM; + + for (i = 0; i < pbm->msiq_num; i++) { + struct sparc64_msiq_cookie *p; + + p = &pbm->msiq_irq_cookies[i]; + p->pbm = pbm; + p->msiqid = pbm->msiq_first + i; + } + + size = pbm->msi_num * sizeof(unsigned int); + pbm->msi_irq_table = kzalloc(size, GFP_KERNEL); + if (!pbm->msi_irq_table) { + kfree(pbm->msiq_irq_cookies); + pbm->msiq_irq_cookies = NULL; + return -ENOMEM; + } + + return 0; +} + +static void msi_table_free(struct pci_pbm_info *pbm) +{ + kfree(pbm->msiq_irq_cookies); + pbm->msiq_irq_cookies = NULL; + + kfree(pbm->msi_irq_table); + pbm->msi_irq_table = NULL; +} + +static int bringup_one_msi_queue(struct pci_pbm_info *pbm, + const struct sparc64_msiq_ops *ops, + unsigned long msiqid, + unsigned long devino) +{ + int irq = ops->msiq_build_irq(pbm, msiqid, devino); + int err, nid; + + if (irq < 0) + return irq; + + nid = pbm->numa_node; + if (nid != -1) { + cpumask_t numa_mask; + + cpumask_copy(&numa_mask, cpumask_of_node(nid)); + irq_set_affinity(irq, &numa_mask); + } + err = request_irq(irq, sparc64_msiq_interrupt, 0, + "MSIQ", + &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]); + if (err) + return err; + + return 0; +} + +static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm, + const struct sparc64_msiq_ops *ops) +{ + int i; + + for (i = 0; i < pbm->msiq_num; i++) { + unsigned long msiqid = i + pbm->msiq_first; + unsigned long devino = i + pbm->msiq_first_devino; + int err; + + err = bringup_one_msi_queue(pbm, ops, msiqid, devino); + if (err) + return err; + } + + return 0; +} + +void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, + const struct sparc64_msiq_ops *ops) +{ + const u32 *val; + int len; + + val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len); + if (!val || len != 4) + goto no_msi; + pbm->msiq_num = *val; + if (pbm->msiq_num) { + const struct msiq_prop { + u32 first_msiq; + u32 num_msiq; + u32 first_devino; + } *mqp; + const struct msi_range_prop { + u32 first_msi; + u32 num_msi; + } *mrng; + const struct addr_range_prop { + u32 msi32_high; + u32 msi32_low; + u32 msi32_len; + u32 msi64_high; + u32 msi64_low; + u32 msi64_len; + } *arng; + + val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len); + if (!val || len != 4) + goto no_msi; + + pbm->msiq_ent_count = *val; + + mqp = of_get_property(pbm->op->dev.of_node, + "msi-eq-to-devino", &len); + if (!mqp) + mqp = of_get_property(pbm->op->dev.of_node, + "msi-eq-devino", &len); + if (!mqp || len != sizeof(struct msiq_prop)) + goto no_msi; + + pbm->msiq_first = mqp->first_msiq; + pbm->msiq_first_devino = mqp->first_devino; + + val = of_get_property(pbm->op->dev.of_node, "#msi", &len); + if (!val || len != 4) + goto no_msi; + pbm->msi_num = *val; + + mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len); + if (!mrng || len != sizeof(struct msi_range_prop)) + goto no_msi; + pbm->msi_first = mrng->first_msi; + + val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len); + if (!val || len != 4) + goto no_msi; + pbm->msi_data_mask = *val; + + val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len); + if (!val || len != 4) + goto no_msi; + pbm->msix_data_width = *val; + + arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges", + &len); + if (!arng || len != sizeof(struct addr_range_prop)) + goto no_msi; + pbm->msi32_start = ((u64)arng->msi32_high << 32) | + (u64) arng->msi32_low; + pbm->msi64_start = ((u64)arng->msi64_high << 32) | + (u64) arng->msi64_low; + pbm->msi32_len = arng->msi32_len; + pbm->msi64_len = arng->msi64_len; + + if (msi_bitmap_alloc(pbm)) + goto no_msi; + + if (msi_table_alloc(pbm)) { + msi_bitmap_free(pbm); + goto no_msi; + } + + if (ops->msiq_alloc(pbm)) { + msi_table_free(pbm); + msi_bitmap_free(pbm); + goto no_msi; + } + + if (sparc64_bringup_msi_queues(pbm, ops)) { + ops->msiq_free(pbm); + msi_table_free(pbm); + msi_bitmap_free(pbm); + goto no_msi; + } + + printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " + "devino[0x%x]\n", + pbm->name, + pbm->msiq_first, pbm->msiq_num, + pbm->msiq_ent_count, + pbm->msiq_first_devino); + printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " + "width[%u]\n", + pbm->name, + pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, + pbm->msix_data_width); + printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] " + "addr64[0x%llx:0x%x]\n", + pbm->name, + pbm->msi32_start, pbm->msi32_len, + pbm->msi64_start, pbm->msi64_len); + printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n", + pbm->name, + __pa(pbm->msi_queues)); + + pbm->msi_ops = ops; + pbm->setup_msi_irq = sparc64_setup_msi_irq; + pbm->teardown_msi_irq = sparc64_teardown_msi_irq; + } + return; + +no_msi: + pbm->msiq_num = 0; + printk(KERN_INFO "%s: No MSI support.\n", pbm->name); +} diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c new file mode 100644 index 000000000..f413371da --- /dev/null +++ b/arch/sparc/kernel/pci_psycho.c @@ -0,0 +1,618 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pci_psycho.c: PSYCHO/U2P specific PCI controller support. + * + * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "pci_impl.h" +#include "iommu_common.h" +#include "psycho_common.h" + +#define DRIVER_NAME "psycho" +#define PFX DRIVER_NAME ": " + +/* Misc. PSYCHO PCI controller register offsets and definitions. */ +#define PSYCHO_CONTROL 0x0010UL +#define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/ +#define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */ +#define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */ +#define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */ +#define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */ +#define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */ +#define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */ +#define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */ +#define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */ +#define PSYCHO_PCIA_CTRL 0x2000UL +#define PSYCHO_PCIB_CTRL 0x4000UL +#define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */ +#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */ +#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */ +#define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */ +#define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */ +#define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */ +#define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */ +#define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */ +#define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */ +#define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */ +#define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */ +#define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */ + +/* PSYCHO error handling support. */ + +/* Helper function of IOMMU error checking, which checks out + * the state of the streaming buffers. The IOMMU lock is + * held when this is called. + * + * For the PCI error case we know which PBM (and thus which + * streaming buffer) caused the error, but for the uncorrectable + * error case we do not. So we always check both streaming caches. + */ +#define PSYCHO_STRBUF_CONTROL_A 0x2800UL +#define PSYCHO_STRBUF_CONTROL_B 0x4800UL +#define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */ +#define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */ +#define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */ +#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ +#define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */ +#define PSYCHO_STRBUF_FLUSH_A 0x2808UL +#define PSYCHO_STRBUF_FLUSH_B 0x4808UL +#define PSYCHO_STRBUF_FSYNC_A 0x2810UL +#define PSYCHO_STRBUF_FSYNC_B 0x4810UL +#define PSYCHO_STC_DATA_A 0xb000UL +#define PSYCHO_STC_DATA_B 0xc000UL +#define PSYCHO_STC_ERR_A 0xb400UL +#define PSYCHO_STC_ERR_B 0xc400UL +#define PSYCHO_STC_TAG_A 0xb800UL +#define PSYCHO_STC_TAG_B 0xc800UL +#define PSYCHO_STC_LINE_A 0xb900UL +#define PSYCHO_STC_LINE_B 0xc900UL + +/* When an Uncorrectable Error or a PCI Error happens, we + * interrogate the IOMMU state to see if it is the cause. + */ +#define PSYCHO_IOMMU_CONTROL 0x0200UL +#define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */ +#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */ +#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */ +#define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */ +#define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */ +#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ +#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */ +#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */ +#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */ +#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */ +#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */ +#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */ +#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */ +#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */ +#define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */ +#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */ +#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */ +#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ +#define PSYCHO_IOMMU_TSBBASE 0x0208UL +#define PSYCHO_IOMMU_FLUSH 0x0210UL +#define PSYCHO_IOMMU_TAG 0xa580UL +#define PSYCHO_IOMMU_DATA 0xa600UL + +/* Uncorrectable Errors. Cause of the error and the address are + * recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors + * relating to UPA interface transactions. + */ +#define PSYCHO_UE_AFSR 0x0030UL +#define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */ +#define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */ +#define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */ +#define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ +#define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */ +#define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/ +#define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */ +#define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */ +#define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */ +#define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */ +#define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */ +#define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */ +#define PSYCHO_UE_AFAR 0x0038UL + +static irqreturn_t psycho_ue_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + unsigned long afsr_reg = pbm->controller_regs + PSYCHO_UE_AFSR; + unsigned long afar_reg = pbm->controller_regs + PSYCHO_UE_AFAR; + unsigned long afsr, afar, error_bits; + int reported; + + /* Latch uncorrectable error status. */ + afar = upa_readq(afar_reg); + afsr = upa_readq(afsr_reg); + + /* Clear the primary/secondary error status bits. */ + error_bits = afsr & + (PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR | + PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR); + if (!error_bits) + return IRQ_NONE; + upa_writeq(error_bits, afsr_reg); + + /* Log the error. */ + printk("%s: Uncorrectable Error, primary error type[%s]\n", + pbm->name, + (((error_bits & PSYCHO_UEAFSR_PPIO) ? + "PIO" : + ((error_bits & PSYCHO_UEAFSR_PDRD) ? + "DMA Read" : + ((error_bits & PSYCHO_UEAFSR_PDWR) ? + "DMA Write" : "???"))))); + printk("%s: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n", + pbm->name, + (afsr & PSYCHO_UEAFSR_BMSK) >> 32UL, + (afsr & PSYCHO_UEAFSR_DOFF) >> 29UL, + (afsr & PSYCHO_UEAFSR_MID) >> 24UL, + ((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0)); + printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); + printk("%s: UE Secondary errors [", pbm->name); + reported = 0; + if (afsr & PSYCHO_UEAFSR_SPIO) { + reported++; + printk("(PIO)"); + } + if (afsr & PSYCHO_UEAFSR_SDRD) { + reported++; + printk("(DMA Read)"); + } + if (afsr & PSYCHO_UEAFSR_SDWR) { + reported++; + printk("(DMA Write)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + /* Interrogate both IOMMUs for error status. */ + psycho_check_iommu_error(pbm, afsr, afar, UE_ERR); + if (pbm->sibling) + psycho_check_iommu_error(pbm->sibling, afsr, afar, UE_ERR); + + return IRQ_HANDLED; +} + +/* Correctable Errors. */ +#define PSYCHO_CE_AFSR 0x0040UL +#define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */ +#define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */ +#define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */ +#define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ +#define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */ +#define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/ +#define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */ +#define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */ +#define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */ +#define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */ +#define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */ +#define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */ +#define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */ +#define PSYCHO_CE_AFAR 0x0040UL + +static irqreturn_t psycho_ce_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + unsigned long afsr_reg = pbm->controller_regs + PSYCHO_CE_AFSR; + unsigned long afar_reg = pbm->controller_regs + PSYCHO_CE_AFAR; + unsigned long afsr, afar, error_bits; + int reported; + + /* Latch error status. */ + afar = upa_readq(afar_reg); + afsr = upa_readq(afsr_reg); + + /* Clear primary/secondary error status bits. */ + error_bits = afsr & + (PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR | + PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR); + if (!error_bits) + return IRQ_NONE; + upa_writeq(error_bits, afsr_reg); + + /* Log the error. */ + printk("%s: Correctable Error, primary error type[%s]\n", + pbm->name, + (((error_bits & PSYCHO_CEAFSR_PPIO) ? + "PIO" : + ((error_bits & PSYCHO_CEAFSR_PDRD) ? + "DMA Read" : + ((error_bits & PSYCHO_CEAFSR_PDWR) ? + "DMA Write" : "???"))))); + + /* XXX Use syndrome and afar to print out module string just like + * XXX UDB CE trap handler does... -DaveM + */ + printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] " + "UPA_MID[%02lx] was_block(%d)\n", + pbm->name, + (afsr & PSYCHO_CEAFSR_ESYND) >> 48UL, + (afsr & PSYCHO_CEAFSR_BMSK) >> 32UL, + (afsr & PSYCHO_CEAFSR_DOFF) >> 29UL, + (afsr & PSYCHO_CEAFSR_MID) >> 24UL, + ((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0)); + printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); + printk("%s: CE Secondary errors [", pbm->name); + reported = 0; + if (afsr & PSYCHO_CEAFSR_SPIO) { + reported++; + printk("(PIO)"); + } + if (afsr & PSYCHO_CEAFSR_SDRD) { + reported++; + printk("(DMA Read)"); + } + if (afsr & PSYCHO_CEAFSR_SDWR) { + reported++; + printk("(DMA Write)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + return IRQ_HANDLED; +} + +/* PCI Errors. They are signalled by the PCI bus module since they + * are associated with a specific bus segment. + */ +#define PSYCHO_PCI_AFSR_A 0x2010UL +#define PSYCHO_PCI_AFSR_B 0x4010UL +#define PSYCHO_PCI_AFAR_A 0x2018UL +#define PSYCHO_PCI_AFAR_B 0x4018UL + +/* XXX What about PowerFail/PowerManagement??? -DaveM */ +#define PSYCHO_ECC_CTRL 0x0020 +#define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ +#define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ +#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ +static void psycho_register_error_handlers(struct pci_pbm_info *pbm) +{ + struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node); + unsigned long base = pbm->controller_regs; + u64 tmp; + int err; + + if (!op) + return; + + /* Psycho interrupt property order is: + * 0: PCIERR INO for this PBM + * 1: UE ERR + * 2: CE ERR + * 3: POWER FAIL + * 4: SPARE HARDWARE + * 5: POWER MANAGEMENT + */ + + if (op->archdata.num_irqs < 6) + return; + + /* We really mean to ignore the return result here. Two + * PCI controller share the same interrupt numbers and + * drive the same front-end hardware. + */ + err = request_irq(op->archdata.irqs[1], psycho_ue_intr, IRQF_SHARED, + "PSYCHO_UE", pbm); + err = request_irq(op->archdata.irqs[2], psycho_ce_intr, IRQF_SHARED, + "PSYCHO_CE", pbm); + + /* This one, however, ought not to fail. We can just warn + * about it since the system can still operate properly even + * if this fails. + */ + err = request_irq(op->archdata.irqs[0], psycho_pcierr_intr, IRQF_SHARED, + "PSYCHO_PCIERR", pbm); + if (err) + printk(KERN_WARNING "%s: Could not register PCIERR, " + "err=%d\n", pbm->name, err); + + /* Enable UE and CE interrupts for controller. */ + upa_writeq((PSYCHO_ECCCTRL_EE | + PSYCHO_ECCCTRL_UE | + PSYCHO_ECCCTRL_CE), base + PSYCHO_ECC_CTRL); + + /* Enable PCI Error interrupts and clear error + * bits for each PBM. + */ + tmp = upa_readq(base + PSYCHO_PCIA_CTRL); + tmp |= (PSYCHO_PCICTRL_SERR | + PSYCHO_PCICTRL_SBH_ERR | + PSYCHO_PCICTRL_EEN); + tmp &= ~(PSYCHO_PCICTRL_SBH_INT); + upa_writeq(tmp, base + PSYCHO_PCIA_CTRL); + + tmp = upa_readq(base + PSYCHO_PCIB_CTRL); + tmp |= (PSYCHO_PCICTRL_SERR | + PSYCHO_PCICTRL_SBH_ERR | + PSYCHO_PCICTRL_EEN); + tmp &= ~(PSYCHO_PCICTRL_SBH_INT); + upa_writeq(tmp, base + PSYCHO_PCIB_CTRL); +} + +/* PSYCHO boot time probing and initialization. */ +static void pbm_config_busmastering(struct pci_pbm_info *pbm) +{ + u8 *addr; + + /* Set cache-line size to 64 bytes, this is actually + * a nop but I do it for completeness. + */ + addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno, + 0, PCI_CACHE_LINE_SIZE); + pci_config_write8(addr, 64 / sizeof(u32)); + + /* Set PBM latency timer to 64 PCI clocks. */ + addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno, + 0, PCI_LATENCY_TIMER); + pci_config_write8(addr, 64); +} + +static void psycho_scan_bus(struct pci_pbm_info *pbm, + struct device *parent) +{ + pbm_config_busmastering(pbm); + pbm->is_66mhz_capable = 0; + pbm->pci_bus = pci_scan_one_pbm(pbm, parent); + + /* After the PCI bus scan is complete, we can register + * the error interrupt handlers. + */ + psycho_register_error_handlers(pbm); +} + +#define PSYCHO_IRQ_RETRY 0x1a00UL +#define PSYCHO_PCIA_DIAG 0x2020UL +#define PSYCHO_PCIB_DIAG 0x4020UL +#define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */ +#define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */ +#define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */ +#define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */ +#define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */ +#define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */ +#define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */ +#define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */ + +static void psycho_controller_hwinit(struct pci_pbm_info *pbm) +{ + u64 tmp; + + upa_writeq(5, pbm->controller_regs + PSYCHO_IRQ_RETRY); + + /* Enable arbiter for all PCI slots. */ + tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIA_CTRL); + tmp |= PSYCHO_PCICTRL_AEN; + upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIA_CTRL); + + tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIB_CTRL); + tmp |= PSYCHO_PCICTRL_AEN; + upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIB_CTRL); + + /* Disable DMA write / PIO read synchronization on + * both PCI bus segments. + * [ U2P Erratum 1243770, STP2223BGA data sheet ] + */ + tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIA_DIAG); + tmp |= PSYCHO_PCIDIAG_DDWSYNC; + upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIA_DIAG); + + tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIB_DIAG); + tmp |= PSYCHO_PCIDIAG_DDWSYNC; + upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIB_DIAG); +} + +static void psycho_pbm_strbuf_init(struct pci_pbm_info *pbm, + int is_pbm_a) +{ + unsigned long base = pbm->controller_regs; + u64 control; + + if (is_pbm_a) { + pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A; + pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A; + pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A; + pbm->stc.strbuf_err_stat = base + PSYCHO_STC_ERR_A; + pbm->stc.strbuf_tag_diag = base + PSYCHO_STC_TAG_A; + pbm->stc.strbuf_line_diag= base + PSYCHO_STC_LINE_A; + } else { + pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B; + pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B; + pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B; + pbm->stc.strbuf_err_stat = base + PSYCHO_STC_ERR_B; + pbm->stc.strbuf_tag_diag = base + PSYCHO_STC_TAG_B; + pbm->stc.strbuf_line_diag= base + PSYCHO_STC_LINE_B; + } + /* PSYCHO's streaming buffer lacks ctx flushing. */ + pbm->stc.strbuf_ctxflush = 0; + pbm->stc.strbuf_ctxmatch_base = 0; + + pbm->stc.strbuf_flushflag = (volatile unsigned long *) + ((((unsigned long)&pbm->stc.__flushflag_buf[0]) + + 63UL) + & ~63UL); + pbm->stc.strbuf_flushflag_pa = (unsigned long) + __pa(pbm->stc.strbuf_flushflag); + + /* Enable the streaming buffer. We have to be careful + * just in case OBP left it with LRU locking enabled. + * + * It is possible to control if PBM will be rerun on + * line misses. Currently I just retain whatever setting + * OBP left us with. All checks so far show it having + * a value of zero. + */ +#undef PSYCHO_STRBUF_RERUN_ENABLE +#undef PSYCHO_STRBUF_RERUN_DISABLE + control = upa_readq(pbm->stc.strbuf_control); + control |= PSYCHO_STRBUF_CTRL_ENAB; + control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR); +#ifdef PSYCHO_STRBUF_RERUN_ENABLE + control &= ~(PSYCHO_STRBUF_CTRL_RRDIS); +#else +#ifdef PSYCHO_STRBUF_RERUN_DISABLE + control |= PSYCHO_STRBUF_CTRL_RRDIS; +#endif +#endif + upa_writeq(control, pbm->stc.strbuf_control); + + pbm->stc.strbuf_enabled = 1; +} + +#define PSYCHO_IOSPACE_A 0x002000000UL +#define PSYCHO_IOSPACE_B 0x002010000UL +#define PSYCHO_IOSPACE_SIZE 0x00000ffffUL +#define PSYCHO_MEMSPACE_A 0x100000000UL +#define PSYCHO_MEMSPACE_B 0x180000000UL +#define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL + +static void psycho_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op, int is_pbm_a) +{ + psycho_pbm_init_common(pbm, op, "PSYCHO", PBM_CHIP_TYPE_PSYCHO); + psycho_pbm_strbuf_init(pbm, is_pbm_a); + psycho_scan_bus(pbm, &op->dev); +} + +static struct pci_pbm_info *psycho_find_sibling(u32 upa_portid) +{ + struct pci_pbm_info *pbm; + + for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { + if (pbm->portid == upa_portid) + return pbm; + } + return NULL; +} + +#define PSYCHO_CONFIGSPACE 0x001000000UL + +static int psycho_probe(struct platform_device *op) +{ + const struct linux_prom64_registers *pr_regs; + struct device_node *dp = op->dev.of_node; + struct pci_pbm_info *pbm; + struct iommu *iommu; + int is_pbm_a, err; + u32 upa_portid; + + upa_portid = of_getintprop_default(dp, "upa-portid", 0xff); + + err = -ENOMEM; + pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); + if (!pbm) { + printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); + goto out_err; + } + + pbm->sibling = psycho_find_sibling(upa_portid); + if (pbm->sibling) { + iommu = pbm->sibling->iommu; + } else { + iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); + if (!iommu) { + printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n"); + goto out_free_controller; + } + } + + pbm->iommu = iommu; + pbm->portid = upa_portid; + + pr_regs = of_get_property(dp, "reg", NULL); + err = -ENODEV; + if (!pr_regs) { + printk(KERN_ERR PFX "No reg property.\n"); + goto out_free_iommu; + } + + is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000); + + pbm->controller_regs = pr_regs[2].phys_addr; + pbm->config_space = (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE); + + if (is_pbm_a) { + pbm->pci_afsr = pbm->controller_regs + PSYCHO_PCI_AFSR_A; + pbm->pci_afar = pbm->controller_regs + PSYCHO_PCI_AFAR_A; + pbm->pci_csr = pbm->controller_regs + PSYCHO_PCIA_CTRL; + } else { + pbm->pci_afsr = pbm->controller_regs + PSYCHO_PCI_AFSR_B; + pbm->pci_afar = pbm->controller_regs + PSYCHO_PCI_AFAR_B; + pbm->pci_csr = pbm->controller_regs + PSYCHO_PCIB_CTRL; + } + + psycho_controller_hwinit(pbm); + if (!pbm->sibling) { + err = psycho_iommu_init(pbm, 128, 0xc0000000, + 0xffffffff, PSYCHO_CONTROL); + if (err) + goto out_free_iommu; + + /* If necessary, hook us up for starfire IRQ translations. */ + if (this_is_starfire) + starfire_hookup(pbm->portid); + } + + psycho_pbm_init(pbm, op, is_pbm_a); + + pbm->next = pci_pbm_root; + pci_pbm_root = pbm; + + if (pbm->sibling) + pbm->sibling->sibling = pbm; + + dev_set_drvdata(&op->dev, pbm); + + return 0; + +out_free_iommu: + if (!pbm->sibling) + kfree(pbm->iommu); + +out_free_controller: + kfree(pbm); + +out_err: + return err; +} + +static const struct of_device_id psycho_match[] = { + { + .name = "pci", + .compatible = "pci108e,8000", + }, + {}, +}; + +static struct platform_driver psycho_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = psycho_match, + }, + .probe = psycho_probe, +}; + +static int __init psycho_init(void) +{ + return platform_driver_register(&psycho_driver); +} + +subsys_initcall(psycho_init); diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c new file mode 100644 index 000000000..3c38ca40a --- /dev/null +++ b/arch/sparc/kernel/pci_sabre.c @@ -0,0 +1,614 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pci_sabre.c: Sabre specific PCI controller support. + * + * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "pci_impl.h" +#include "iommu_common.h" +#include "psycho_common.h" + +#define DRIVER_NAME "sabre" +#define PFX DRIVER_NAME ": " + +/* SABRE PCI controller register offsets and definitions. */ +#define SABRE_UE_AFSR 0x0030UL +#define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */ +#define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */ +#define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */ +#define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */ +#define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */ +#define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */ +#define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */ +#define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */ +#define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */ +#define SABRE_UECE_AFAR 0x0038UL +#define SABRE_CE_AFSR 0x0040UL +#define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */ +#define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */ +#define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */ +#define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */ +#define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */ +#define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */ +#define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */ +#define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */ +#define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */ +#define SABRE_IOMMU_CONTROL 0x0200UL +#define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */ +#define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */ +#define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */ +#define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */ +#define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ +#define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000 +#define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000 +#define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000 +#define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000 +#define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000 +#define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000 +#define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000 +#define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000 +#define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */ +#define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ +#define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ +#define SABRE_IOMMU_TSBBASE 0x0208UL +#define SABRE_IOMMU_FLUSH 0x0210UL +#define SABRE_IMAP_A_SLOT0 0x0c00UL +#define SABRE_IMAP_B_SLOT0 0x0c20UL +#define SABRE_IMAP_SCSI 0x1000UL +#define SABRE_IMAP_ETH 0x1008UL +#define SABRE_IMAP_BPP 0x1010UL +#define SABRE_IMAP_AU_REC 0x1018UL +#define SABRE_IMAP_AU_PLAY 0x1020UL +#define SABRE_IMAP_PFAIL 0x1028UL +#define SABRE_IMAP_KMS 0x1030UL +#define SABRE_IMAP_FLPY 0x1038UL +#define SABRE_IMAP_SHW 0x1040UL +#define SABRE_IMAP_KBD 0x1048UL +#define SABRE_IMAP_MS 0x1050UL +#define SABRE_IMAP_SER 0x1058UL +#define SABRE_IMAP_UE 0x1070UL +#define SABRE_IMAP_CE 0x1078UL +#define SABRE_IMAP_PCIERR 0x1080UL +#define SABRE_IMAP_GFX 0x1098UL +#define SABRE_IMAP_EUPA 0x10a0UL +#define SABRE_ICLR_A_SLOT0 0x1400UL +#define SABRE_ICLR_B_SLOT0 0x1480UL +#define SABRE_ICLR_SCSI 0x1800UL +#define SABRE_ICLR_ETH 0x1808UL +#define SABRE_ICLR_BPP 0x1810UL +#define SABRE_ICLR_AU_REC 0x1818UL +#define SABRE_ICLR_AU_PLAY 0x1820UL +#define SABRE_ICLR_PFAIL 0x1828UL +#define SABRE_ICLR_KMS 0x1830UL +#define SABRE_ICLR_FLPY 0x1838UL +#define SABRE_ICLR_SHW 0x1840UL +#define SABRE_ICLR_KBD 0x1848UL +#define SABRE_ICLR_MS 0x1850UL +#define SABRE_ICLR_SER 0x1858UL +#define SABRE_ICLR_UE 0x1870UL +#define SABRE_ICLR_CE 0x1878UL +#define SABRE_ICLR_PCIERR 0x1880UL +#define SABRE_WRSYNC 0x1c20UL +#define SABRE_PCICTRL 0x2000UL +#define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */ +#define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */ +#define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */ +#define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */ +#define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */ +#define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */ +#define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */ +#define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */ +#define SABRE_PIOAFSR 0x2010UL +#define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */ +#define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */ +#define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */ +#define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */ +#define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */ +#define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */ +#define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */ +#define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */ +#define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */ +#define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */ +#define SABRE_PIOAFAR 0x2018UL +#define SABRE_PCIDIAG 0x2020UL +#define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */ +#define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */ +#define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */ +#define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */ +#define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */ +#define SABRE_PCITASR 0x2028UL +#define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */ +#define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */ +#define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */ +#define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */ +#define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */ +#define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */ +#define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */ +#define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */ +#define SABRE_PIOBUF_DIAG 0x5000UL +#define SABRE_DMABUF_DIAGLO 0x5100UL +#define SABRE_DMABUF_DIAGHI 0x51c0UL +#define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */ +#define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */ +#define SABRE_IOMMU_VADIAG 0xa400UL +#define SABRE_IOMMU_TCDIAG 0xa408UL +#define SABRE_IOMMU_TAG 0xa580UL +#define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */ +#define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */ +#define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */ +#define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */ +#define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */ +#define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */ +#define SABRE_IOMMU_DATA 0xa600UL +#define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */ +#define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */ +#define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */ +#define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */ +#define SABRE_PCI_IRQSTATE 0xa800UL +#define SABRE_OBIO_IRQSTATE 0xa808UL +#define SABRE_FFBCFG 0xf000UL +#define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */ +#define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */ +#define SABRE_MCCTRL0 0xf010UL +#define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */ +#define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */ +#define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */ +#define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */ +#define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */ +#define SABRE_MCCTRL1 0xf018UL +#define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */ +#define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */ +#define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */ +#define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */ +#define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */ +#define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */ +#define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */ +#define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */ +#define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */ +#define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */ +#define SABRE_RESETCTRL 0xf020UL + +#define SABRE_CONFIGSPACE 0x001000000UL +#define SABRE_IOSPACE 0x002000000UL +#define SABRE_IOSPACE_SIZE 0x000ffffffUL +#define SABRE_MEMSPACE 0x100000000UL +#define SABRE_MEMSPACE_SIZE 0x07fffffffUL + +static int hummingbird_p; +static struct pci_bus *sabre_root_bus; + +static irqreturn_t sabre_ue_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + unsigned long afsr_reg = pbm->controller_regs + SABRE_UE_AFSR; + unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR; + unsigned long afsr, afar, error_bits; + int reported; + + /* Latch uncorrectable error status. */ + afar = upa_readq(afar_reg); + afsr = upa_readq(afsr_reg); + + /* Clear the primary/secondary error status bits. */ + error_bits = afsr & + (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | + SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | + SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE); + if (!error_bits) + return IRQ_NONE; + upa_writeq(error_bits, afsr_reg); + + /* Log the error. */ + printk("%s: Uncorrectable Error, primary error type[%s%s]\n", + pbm->name, + ((error_bits & SABRE_UEAFSR_PDRD) ? + "DMA Read" : + ((error_bits & SABRE_UEAFSR_PDWR) ? + "DMA Write" : "???")), + ((error_bits & SABRE_UEAFSR_PDTE) ? + ":Translation Error" : "")); + printk("%s: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n", + pbm->name, + (afsr & SABRE_UEAFSR_BMSK) >> 32UL, + (afsr & SABRE_UEAFSR_OFF) >> 29UL, + ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0)); + printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); + printk("%s: UE Secondary errors [", pbm->name); + reported = 0; + if (afsr & SABRE_UEAFSR_SDRD) { + reported++; + printk("(DMA Read)"); + } + if (afsr & SABRE_UEAFSR_SDWR) { + reported++; + printk("(DMA Write)"); + } + if (afsr & SABRE_UEAFSR_SDTE) { + reported++; + printk("(Translation Error)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + /* Interrogate IOMMU for error status. */ + psycho_check_iommu_error(pbm, afsr, afar, UE_ERR); + + return IRQ_HANDLED; +} + +static irqreturn_t sabre_ce_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + unsigned long afsr_reg = pbm->controller_regs + SABRE_CE_AFSR; + unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR; + unsigned long afsr, afar, error_bits; + int reported; + + /* Latch error status. */ + afar = upa_readq(afar_reg); + afsr = upa_readq(afsr_reg); + + /* Clear primary/secondary error status bits. */ + error_bits = afsr & + (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | + SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR); + if (!error_bits) + return IRQ_NONE; + upa_writeq(error_bits, afsr_reg); + + /* Log the error. */ + printk("%s: Correctable Error, primary error type[%s]\n", + pbm->name, + ((error_bits & SABRE_CEAFSR_PDRD) ? + "DMA Read" : + ((error_bits & SABRE_CEAFSR_PDWR) ? + "DMA Write" : "???"))); + + /* XXX Use syndrome and afar to print out module string just like + * XXX UDB CE trap handler does... -DaveM + */ + printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] " + "was_block(%d)\n", + pbm->name, + (afsr & SABRE_CEAFSR_ESYND) >> 48UL, + (afsr & SABRE_CEAFSR_BMSK) >> 32UL, + (afsr & SABRE_CEAFSR_OFF) >> 29UL, + ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0)); + printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); + printk("%s: CE Secondary errors [", pbm->name); + reported = 0; + if (afsr & SABRE_CEAFSR_SDRD) { + reported++; + printk("(DMA Read)"); + } + if (afsr & SABRE_CEAFSR_SDWR) { + reported++; + printk("(DMA Write)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + return IRQ_HANDLED; +} + +static void sabre_register_error_handlers(struct pci_pbm_info *pbm) +{ + struct device_node *dp = pbm->op->dev.of_node; + struct platform_device *op; + unsigned long base = pbm->controller_regs; + u64 tmp; + int err; + + if (pbm->chip_type == PBM_CHIP_TYPE_SABRE) + dp = dp->parent; + + op = of_find_device_by_node(dp); + if (!op) + return; + + /* Sabre/Hummingbird IRQ property layout is: + * 0: PCI ERR + * 1: UE ERR + * 2: CE ERR + * 3: POWER FAIL + */ + if (op->archdata.num_irqs < 4) + return; + + /* We clear the error bits in the appropriate AFSR before + * registering the handler so that we don't get spurious + * interrupts. + */ + upa_writeq((SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | + SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | + SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE), + base + SABRE_UE_AFSR); + + err = request_irq(op->archdata.irqs[1], sabre_ue_intr, 0, "SABRE_UE", pbm); + if (err) + printk(KERN_WARNING "%s: Couldn't register UE, err=%d.\n", + pbm->name, err); + + upa_writeq((SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | + SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR), + base + SABRE_CE_AFSR); + + + err = request_irq(op->archdata.irqs[2], sabre_ce_intr, 0, "SABRE_CE", pbm); + if (err) + printk(KERN_WARNING "%s: Couldn't register CE, err=%d.\n", + pbm->name, err); + err = request_irq(op->archdata.irqs[0], psycho_pcierr_intr, 0, + "SABRE_PCIERR", pbm); + if (err) + printk(KERN_WARNING "%s: Couldn't register PCIERR, err=%d.\n", + pbm->name, err); + + tmp = upa_readq(base + SABRE_PCICTRL); + tmp |= SABRE_PCICTRL_ERREN; + upa_writeq(tmp, base + SABRE_PCICTRL); +} + +static void apb_init(struct pci_bus *sabre_bus) +{ + struct pci_dev *pdev; + + list_for_each_entry(pdev, &sabre_bus->devices, bus_list) { + if (pdev->vendor == PCI_VENDOR_ID_SUN && + pdev->device == PCI_DEVICE_ID_SUN_SIMBA) { + u16 word16; + + pci_read_config_word(pdev, PCI_COMMAND, &word16); + word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | + PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | + PCI_COMMAND_IO; + pci_write_config_word(pdev, PCI_COMMAND, word16); + + /* Status register bits are "write 1 to clear". */ + pci_write_config_word(pdev, PCI_STATUS, 0xffff); + pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff); + + /* Use a primary/seconday latency timer value + * of 64. + */ + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); + pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64); + + /* Enable reporting/forwarding of master aborts, + * parity, and SERR. + */ + pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL, + (PCI_BRIDGE_CTL_PARITY | + PCI_BRIDGE_CTL_SERR | + PCI_BRIDGE_CTL_MASTER_ABORT)); + } + } +} + +static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent) +{ + static int once; + + /* The APB bridge speaks to the Sabre host PCI bridge + * at 66Mhz, but the front side of APB runs at 33Mhz + * for both segments. + * + * Hummingbird systems do not use APB, so they run + * at 66MHZ. + */ + if (hummingbird_p) + pbm->is_66mhz_capable = 1; + else + pbm->is_66mhz_capable = 0; + + /* This driver has not been verified to handle + * multiple SABREs yet, so trap this. + * + * Also note that the SABRE host bridge is hardwired + * to live at bus 0. + */ + if (once != 0) { + printk(KERN_ERR PFX "Multiple controllers unsupported.\n"); + return; + } + once++; + + pbm->pci_bus = pci_scan_one_pbm(pbm, parent); + if (!pbm->pci_bus) + return; + + sabre_root_bus = pbm->pci_bus; + + apb_init(pbm->pci_bus); + + sabre_register_error_handlers(pbm); +} + +static void sabre_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op) +{ + psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE); + pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR; + pbm->pci_afar = pbm->controller_regs + SABRE_PIOAFAR; + pbm->pci_csr = pbm->controller_regs + SABRE_PCICTRL; + sabre_scan_bus(pbm, &op->dev); +} + +static const struct of_device_id sabre_match[]; +static int sabre_probe(struct platform_device *op) +{ + const struct of_device_id *match; + const struct linux_prom64_registers *pr_regs; + struct device_node *dp = op->dev.of_node; + struct pci_pbm_info *pbm; + u32 upa_portid, dma_mask; + struct iommu *iommu; + int tsbsize, err; + const u32 *vdma; + u64 clear_irq; + + match = of_match_device(sabre_match, &op->dev); + hummingbird_p = match && (match->data != NULL); + if (!hummingbird_p) { + struct device_node *cpu_dp; + + /* Of course, Sun has to encode things a thousand + * different ways, inconsistently. + */ + for_each_node_by_type(cpu_dp, "cpu") { + if (of_node_name_eq(cpu_dp, "SUNW,UltraSPARC-IIe")) + hummingbird_p = 1; + } + } + + err = -ENOMEM; + pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); + if (!pbm) { + printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); + goto out_err; + } + + iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); + if (!iommu) { + printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n"); + goto out_free_controller; + } + + pbm->iommu = iommu; + + upa_portid = of_getintprop_default(dp, "upa-portid", 0xff); + + pbm->portid = upa_portid; + + /* + * Map in SABRE register set and report the presence of this SABRE. + */ + + pr_regs = of_get_property(dp, "reg", NULL); + err = -ENODEV; + if (!pr_regs) { + printk(KERN_ERR PFX "No reg property\n"); + goto out_free_iommu; + } + + /* + * First REG in property is base of entire SABRE register space. + */ + pbm->controller_regs = pr_regs[0].phys_addr; + + /* Clear interrupts */ + + /* PCI first */ + for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8) + upa_writeq(0x0UL, pbm->controller_regs + clear_irq); + + /* Then OBIO */ + for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8) + upa_writeq(0x0UL, pbm->controller_regs + clear_irq); + + /* Error interrupts are enabled later after the bus scan. */ + upa_writeq((SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR | + SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN), + pbm->controller_regs + SABRE_PCICTRL); + + /* Now map in PCI config space for entire SABRE. */ + pbm->config_space = pbm->controller_regs + SABRE_CONFIGSPACE; + + vdma = of_get_property(dp, "virtual-dma", NULL); + if (!vdma) { + printk(KERN_ERR PFX "No virtual-dma property\n"); + goto out_free_iommu; + } + + dma_mask = vdma[0]; + switch(vdma[1]) { + case 0x20000000: + dma_mask |= 0x1fffffff; + tsbsize = 64; + break; + case 0x40000000: + dma_mask |= 0x3fffffff; + tsbsize = 128; + break; + + case 0x80000000: + dma_mask |= 0x7fffffff; + tsbsize = 128; + break; + default: + printk(KERN_ERR PFX "Strange virtual-dma size.\n"); + goto out_free_iommu; + } + + err = psycho_iommu_init(pbm, tsbsize, vdma[0], dma_mask, SABRE_WRSYNC); + if (err) + goto out_free_iommu; + + /* + * Look for APB underneath. + */ + sabre_pbm_init(pbm, op); + + pbm->next = pci_pbm_root; + pci_pbm_root = pbm; + + dev_set_drvdata(&op->dev, pbm); + + return 0; + +out_free_iommu: + kfree(pbm->iommu); + +out_free_controller: + kfree(pbm); + +out_err: + return err; +} + +static const struct of_device_id sabre_match[] = { + { + .name = "pci", + .compatible = "pci108e,a001", + .data = (void *) 1, + }, + { + .name = "pci", + .compatible = "pci108e,a000", + }, + {}, +}; + +static struct platform_driver sabre_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = sabre_match, + }, + .probe = sabre_probe, +}; + +static int __init sabre_init(void) +{ + return platform_driver_register(&sabre_driver); +} + +subsys_initcall(sabre_init); diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c new file mode 100644 index 000000000..421aba00e --- /dev/null +++ b/arch/sparc/kernel/pci_schizo.c @@ -0,0 +1,1510 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support. + * + * Copyright (C) 2001, 2002, 2003, 2007, 2008 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "pci_impl.h" +#include "iommu_common.h" + +#define DRIVER_NAME "schizo" +#define PFX DRIVER_NAME ": " + +/* This is a convention that at least Excalibur and Merlin + * follow. I suppose the SCHIZO used in Starcat and friends + * will do similar. + * + * The only way I could see this changing is if the newlink + * block requires more space in Schizo's address space than + * they predicted, thus requiring an address space reorg when + * the newer Schizo is taped out. + */ + +/* Streaming buffer control register. */ +#define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */ +#define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */ +#define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */ +#define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ +#define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */ + +/* IOMMU control register. */ +#define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */ +#define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */ +#define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */ +#define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */ +#define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */ +#define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ +#define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */ +#define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */ +#define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */ +#define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */ +#define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */ +#define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */ +#define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */ +#define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */ +#define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */ +#define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */ +#define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */ +#define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ + +/* Schizo config space address format is nearly identical to + * that of PSYCHO: + * + * 32 24 23 16 15 11 10 8 7 2 1 0 + * --------------------------------------------------------- + * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 | + * --------------------------------------------------------- + */ +#define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space) +#define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \ + (((unsigned long)(BUS) << 16) | \ + ((unsigned long)(DEVFN) << 8) | \ + ((unsigned long)(REG))) + +static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm, + unsigned char bus, + unsigned int devfn, + int where) +{ + if (!pbm) + return NULL; + bus -= pbm->pci_first_busno; + return (void *) + (SCHIZO_CONFIG_BASE(pbm) | + SCHIZO_CONFIG_ENCODE(bus, devfn, where)); +} + +/* SCHIZO error handling support. */ +enum schizo_error_type { + UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR +}; + +static DEFINE_SPINLOCK(stc_buf_lock); +static unsigned long stc_error_buf[128]; +static unsigned long stc_tag_buf[16]; +static unsigned long stc_line_buf[16]; + +#define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */ +#define SCHIZO_CE_INO 0x31 /* Correctable ECC error */ +#define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */ +#define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */ +#define SCHIZO_SERR_INO 0x34 /* Safari interface error */ + +#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */ +#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */ +#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */ + +#define SCHIZO_STCERR_WRITE 0x2UL +#define SCHIZO_STCERR_READ 0x1UL + +#define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL +#define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL +#define SCHIZO_STCTAG_VALID 0x8000000000000000UL +#define SCHIZO_STCTAG_READ 0x4000000000000000UL + +#define SCHIZO_STCLINE_LINDX 0x0000000007800000UL +#define SCHIZO_STCLINE_SPTR 0x000000000007e000UL +#define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL +#define SCHIZO_STCLINE_EPTR 0x000000000000003fUL +#define SCHIZO_STCLINE_VALID 0x0000000000600000UL +#define SCHIZO_STCLINE_FOFN 0x0000000000180000UL + +static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, + enum schizo_error_type type) +{ + struct strbuf *strbuf = &pbm->stc; + unsigned long regbase = pbm->pbm_regs; + unsigned long err_base, tag_base, line_base; + u64 control; + int i; + + err_base = regbase + SCHIZO_STC_ERR; + tag_base = regbase + SCHIZO_STC_TAG; + line_base = regbase + SCHIZO_STC_LINE; + + spin_lock(&stc_buf_lock); + + /* This is __REALLY__ dangerous. When we put the + * streaming buffer into diagnostic mode to probe + * it's tags and error status, we _must_ clear all + * of the line tag valid bits before re-enabling + * the streaming buffer. If any dirty data lives + * in the STC when we do this, we will end up + * invalidating it before it has a chance to reach + * main memory. + */ + control = upa_readq(strbuf->strbuf_control); + upa_writeq((control | SCHIZO_STRBUF_CTRL_DENAB), + strbuf->strbuf_control); + for (i = 0; i < 128; i++) { + unsigned long val; + + val = upa_readq(err_base + (i * 8UL)); + upa_writeq(0UL, err_base + (i * 8UL)); + stc_error_buf[i] = val; + } + for (i = 0; i < 16; i++) { + stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL)); + stc_line_buf[i] = upa_readq(line_base + (i * 8UL)); + upa_writeq(0UL, tag_base + (i * 8UL)); + upa_writeq(0UL, line_base + (i * 8UL)); + } + + /* OK, state is logged, exit diagnostic mode. */ + upa_writeq(control, strbuf->strbuf_control); + + for (i = 0; i < 16; i++) { + int j, saw_error, first, last; + + saw_error = 0; + first = i * 8; + last = first + 8; + for (j = first; j < last; j++) { + unsigned long errval = stc_error_buf[j]; + if (errval != 0) { + saw_error++; + printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n", + pbm->name, + j, + (errval & SCHIZO_STCERR_WRITE) ? 1 : 0, + (errval & SCHIZO_STCERR_READ) ? 1 : 0); + } + } + if (saw_error != 0) { + unsigned long tagval = stc_tag_buf[i]; + unsigned long lineval = stc_line_buf[i]; + printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n", + pbm->name, + i, + ((tagval & SCHIZO_STCTAG_PPN) >> 19UL), + (tagval & SCHIZO_STCTAG_VPN), + ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0), + ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0)); + + /* XXX Should spit out per-bank error information... -DaveM */ + printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)" + "V(%d)FOFN(%d)]\n", + pbm->name, + i, + ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL), + ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL), + ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL), + ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL), + ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0), + ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0)); + } + } + + spin_unlock(&stc_buf_lock); +} + +/* IOMMU is per-PBM in Schizo, so interrogate both for anonymous + * controller level errors. + */ + +#define SCHIZO_IOMMU_TAG 0xa580UL +#define SCHIZO_IOMMU_DATA 0xa600UL + +#define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL +#define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL +#define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL +#define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL +#define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL +#define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL +#define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL + +#define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL +#define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL +#define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL + +static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, + enum schizo_error_type type) +{ + struct iommu *iommu = pbm->iommu; + unsigned long iommu_tag[16]; + unsigned long iommu_data[16]; + unsigned long flags; + u64 control; + int i; + + spin_lock_irqsave(&iommu->lock, flags); + control = upa_readq(iommu->iommu_control); + if (control & SCHIZO_IOMMU_CTRL_XLTEERR) { + unsigned long base; + char *type_string; + + /* Clear the error encountered bit. */ + control &= ~SCHIZO_IOMMU_CTRL_XLTEERR; + upa_writeq(control, iommu->iommu_control); + + switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) { + case 0: + type_string = "Protection Error"; + break; + case 1: + type_string = "Invalid Error"; + break; + case 2: + type_string = "TimeOut Error"; + break; + case 3: + default: + type_string = "ECC Error"; + break; + } + printk("%s: IOMMU Error, type[%s]\n", + pbm->name, type_string); + + /* Put the IOMMU into diagnostic mode and probe + * it's TLB for entries with error status. + * + * It is very possible for another DVMA to occur + * while we do this probe, and corrupt the system + * further. But we are so screwed at this point + * that we are likely to crash hard anyways, so + * get as much diagnostic information to the + * console as we can. + */ + upa_writeq(control | SCHIZO_IOMMU_CTRL_DENAB, + iommu->iommu_control); + + base = pbm->pbm_regs; + + for (i = 0; i < 16; i++) { + iommu_tag[i] = + upa_readq(base + SCHIZO_IOMMU_TAG + (i * 8UL)); + iommu_data[i] = + upa_readq(base + SCHIZO_IOMMU_DATA + (i * 8UL)); + + /* Now clear out the entry. */ + upa_writeq(0, base + SCHIZO_IOMMU_TAG + (i * 8UL)); + upa_writeq(0, base + SCHIZO_IOMMU_DATA + (i * 8UL)); + } + + /* Leave diagnostic mode. */ + upa_writeq(control, iommu->iommu_control); + + for (i = 0; i < 16; i++) { + unsigned long tag, data; + + tag = iommu_tag[i]; + if (!(tag & SCHIZO_IOMMU_TAG_ERR)) + continue; + + data = iommu_data[i]; + switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) { + case 0: + type_string = "Protection Error"; + break; + case 1: + type_string = "Invalid Error"; + break; + case 2: + type_string = "TimeOut Error"; + break; + case 3: + default: + type_string = "ECC Error"; + break; + } + printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " + "sz(%dK) vpg(%08lx)]\n", + pbm->name, i, type_string, + (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL), + ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0), + ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0), + ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8), + (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT); + printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n", + pbm->name, i, + ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0), + ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0), + (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT); + } + } + if (pbm->stc.strbuf_enabled) + __schizo_check_stc_error_pbm(pbm, type); + spin_unlock_irqrestore(&iommu->lock, flags); +} + +static void schizo_check_iommu_error(struct pci_pbm_info *pbm, + enum schizo_error_type type) +{ + schizo_check_iommu_error_pbm(pbm, type); + if (pbm->sibling) + schizo_check_iommu_error_pbm(pbm->sibling, type); +} + +/* Uncorrectable ECC error status gathering. */ +#define SCHIZO_UE_AFSR 0x10030UL +#define SCHIZO_UE_AFAR 0x10038UL + +#define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */ +#define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */ +#define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */ +#define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */ +#define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */ +#define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */ +#define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */ +#define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */ +#define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */ +#define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */ +#define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */ +#define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */ +#define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */ +#define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */ + +static irqreturn_t schizo_ue_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + unsigned long afsr_reg = pbm->controller_regs + SCHIZO_UE_AFSR; + unsigned long afar_reg = pbm->controller_regs + SCHIZO_UE_AFAR; + unsigned long afsr, afar, error_bits; + int reported, limit; + + /* Latch uncorrectable error status. */ + afar = upa_readq(afar_reg); + + /* If either of the error pending bits are set in the + * AFSR, the error status is being actively updated by + * the hardware and we must re-read to get a clean value. + */ + limit = 1000; + do { + afsr = upa_readq(afsr_reg); + } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); + + /* Clear the primary/secondary error status bits. */ + error_bits = afsr & + (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR | + SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA); + if (!error_bits) + return IRQ_NONE; + upa_writeq(error_bits, afsr_reg); + + /* Log the error. */ + printk("%s: Uncorrectable Error, primary error type[%s]\n", + pbm->name, + (((error_bits & SCHIZO_UEAFSR_PPIO) ? + "PIO" : + ((error_bits & SCHIZO_UEAFSR_PDRD) ? + "DMA Read" : + ((error_bits & SCHIZO_UEAFSR_PDWR) ? + "DMA Write" : "???"))))); + printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", + pbm->name, + (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, + (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, + (afsr & SCHIZO_UEAFSR_AID) >> 24UL); + printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", + pbm->name, + (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, + (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, + (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, + (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, + (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); + printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); + printk("%s: UE Secondary errors [", pbm->name); + reported = 0; + if (afsr & SCHIZO_UEAFSR_SPIO) { + reported++; + printk("(PIO)"); + } + if (afsr & SCHIZO_UEAFSR_SDMA) { + reported++; + printk("(DMA)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + /* Interrogate IOMMU for error status. */ + schizo_check_iommu_error(pbm, UE_ERR); + + return IRQ_HANDLED; +} + +#define SCHIZO_CE_AFSR 0x10040UL +#define SCHIZO_CE_AFAR 0x10048UL + +#define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL +#define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL +#define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL +#define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL +#define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL +#define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL +#define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL +#define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL +#define SCHIZO_CEAFSR_AID 0x000000001f000000UL +#define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL +#define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL +#define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL +#define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL +#define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL + +static irqreturn_t schizo_ce_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + unsigned long afsr_reg = pbm->controller_regs + SCHIZO_CE_AFSR; + unsigned long afar_reg = pbm->controller_regs + SCHIZO_CE_AFAR; + unsigned long afsr, afar, error_bits; + int reported, limit; + + /* Latch error status. */ + afar = upa_readq(afar_reg); + + /* If either of the error pending bits are set in the + * AFSR, the error status is being actively updated by + * the hardware and we must re-read to get a clean value. + */ + limit = 1000; + do { + afsr = upa_readq(afsr_reg); + } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); + + /* Clear primary/secondary error status bits. */ + error_bits = afsr & + (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR | + SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA); + if (!error_bits) + return IRQ_NONE; + upa_writeq(error_bits, afsr_reg); + + /* Log the error. */ + printk("%s: Correctable Error, primary error type[%s]\n", + pbm->name, + (((error_bits & SCHIZO_CEAFSR_PPIO) ? + "PIO" : + ((error_bits & SCHIZO_CEAFSR_PDRD) ? + "DMA Read" : + ((error_bits & SCHIZO_CEAFSR_PDWR) ? + "DMA Write" : "???"))))); + + /* XXX Use syndrome and afar to print out module string just like + * XXX UDB CE trap handler does... -DaveM + */ + printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", + pbm->name, + (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, + (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, + (afsr & SCHIZO_UEAFSR_AID) >> 24UL); + printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", + pbm->name, + (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, + (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, + (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, + (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, + (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); + printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); + printk("%s: CE Secondary errors [", pbm->name); + reported = 0; + if (afsr & SCHIZO_CEAFSR_SPIO) { + reported++; + printk("(PIO)"); + } + if (afsr & SCHIZO_CEAFSR_SDMA) { + reported++; + printk("(DMA)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + return IRQ_HANDLED; +} + +#define SCHIZO_PCI_AFSR 0x2010UL +#define SCHIZO_PCI_AFAR 0x2018UL + +#define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */ +#define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */ +#define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */ +#define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */ + +#define SCHIZO_PCI_CTRL (0x2000UL) +#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ +#define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ +#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ +#define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */ +#define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */ +#define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */ +#define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */ +#define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */ +#define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */ +#define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */ +#define SCHIZO_PCICTRL_PTO_SHIFT 24UL +#define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */ +#define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */ +#define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */ +#define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */ +#define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */ +#define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */ +#define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */ +#define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */ + +static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) +{ + unsigned long csr_reg, csr, csr_error_bits; + irqreturn_t ret = IRQ_NONE; + u32 stat; + + csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; + csr = upa_readq(csr_reg); + csr_error_bits = + csr & (SCHIZO_PCICTRL_BUS_UNUS | + SCHIZO_PCICTRL_TTO_ERR | + SCHIZO_PCICTRL_RTRY_ERR | + SCHIZO_PCICTRL_DTO_ERR | + SCHIZO_PCICTRL_SBH_ERR | + SCHIZO_PCICTRL_SERR); + if (csr_error_bits) { + /* Clear the errors. */ + upa_writeq(csr, csr_reg); + + /* Log 'em. */ + if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS) + printk("%s: Bus unusable error asserted.\n", + pbm->name); + if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR) + printk("%s: PCI TRDY# timeout error asserted.\n", + pbm->name); + if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR) + printk("%s: PCI excessive retry error asserted.\n", + pbm->name); + if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR) + printk("%s: PCI discard timeout error asserted.\n", + pbm->name); + if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR) + printk("%s: PCI streaming byte hole error asserted.\n", + pbm->name); + if (csr_error_bits & SCHIZO_PCICTRL_SERR) + printk("%s: PCI SERR signal asserted.\n", + pbm->name); + ret = IRQ_HANDLED; + } + pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat); + if (stat & (PCI_STATUS_PARITY | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR)) { + printk("%s: PCI bus error, PCI_STATUS[%04x]\n", + pbm->name, stat); + pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff); + ret = IRQ_HANDLED; + } + return ret; +} + +static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + unsigned long afsr_reg, afar_reg, base; + unsigned long afsr, afar, error_bits; + int reported; + + base = pbm->pbm_regs; + + afsr_reg = base + SCHIZO_PCI_AFSR; + afar_reg = base + SCHIZO_PCI_AFAR; + + /* Latch error status. */ + afar = upa_readq(afar_reg); + afsr = upa_readq(afsr_reg); + + /* Clear primary/secondary error status bits. */ + error_bits = afsr & + (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | + SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | + SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | + SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | + SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | + SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS); + if (!error_bits) + return schizo_pcierr_intr_other(pbm); + upa_writeq(error_bits, afsr_reg); + + /* Log the error. */ + printk("%s: PCI Error, primary error type[%s]\n", + pbm->name, + (((error_bits & SCHIZO_PCIAFSR_PMA) ? + "Master Abort" : + ((error_bits & SCHIZO_PCIAFSR_PTA) ? + "Target Abort" : + ((error_bits & SCHIZO_PCIAFSR_PRTRY) ? + "Excessive Retries" : + ((error_bits & SCHIZO_PCIAFSR_PPERR) ? + "Parity Error" : + ((error_bits & SCHIZO_PCIAFSR_PTTO) ? + "Timeout" : + ((error_bits & SCHIZO_PCIAFSR_PUNUS) ? + "Bus Unusable" : "???")))))))); + printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n", + pbm->name, + (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL, + (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0, + ((afsr & SCHIZO_PCIAFSR_CFG) ? + "Config" : + ((afsr & SCHIZO_PCIAFSR_MEM) ? + "Memory" : + ((afsr & SCHIZO_PCIAFSR_IO) ? + "I/O" : "???")))); + printk("%s: PCI AFAR [%016lx]\n", + pbm->name, afar); + printk("%s: PCI Secondary errors [", + pbm->name); + reported = 0; + if (afsr & SCHIZO_PCIAFSR_SMA) { + reported++; + printk("(Master Abort)"); + } + if (afsr & SCHIZO_PCIAFSR_STA) { + reported++; + printk("(Target Abort)"); + } + if (afsr & SCHIZO_PCIAFSR_SRTRY) { + reported++; + printk("(Excessive Retries)"); + } + if (afsr & SCHIZO_PCIAFSR_SPERR) { + reported++; + printk("(Parity Error)"); + } + if (afsr & SCHIZO_PCIAFSR_STTO) { + reported++; + printk("(Timeout)"); + } + if (afsr & SCHIZO_PCIAFSR_SUNUS) { + reported++; + printk("(Bus Unusable)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + /* For the error types shown, scan PBM's PCI bus for devices + * which have logged that error type. + */ + + /* If we see a Target Abort, this could be the result of an + * IOMMU translation error of some sort. It is extremely + * useful to log this information as usually it indicates + * a bug in the IOMMU support code or a PCI device driver. + */ + if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) { + schizo_check_iommu_error(pbm, PCI_ERR); + pci_scan_for_target_abort(pbm, pbm->pci_bus); + } + if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA)) + pci_scan_for_master_abort(pbm, pbm->pci_bus); + + /* For excessive retries, PSYCHO/PBM will abort the device + * and there is no way to specifically check for excessive + * retries in the config space status registers. So what + * we hope is that we'll catch it via the master/target + * abort events. + */ + + if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR)) + pci_scan_for_parity_error(pbm, pbm->pci_bus); + + return IRQ_HANDLED; +} + +#define SCHIZO_SAFARI_ERRLOG 0x10018UL + +#define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL + +#define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */ +#define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */ +#define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */ +#define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */ +#define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */ +#define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */ +#define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */ +#define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */ +#define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */ +#define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */ +#define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */ +#define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */ +#define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */ +#define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */ +#define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */ +#define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */ +#define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */ +#define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */ +#define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */ +#define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */ +#define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */ +#define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */ +#define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */ +#define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */ +#define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */ +#define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */ +#define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */ +#define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */ +#define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */ +#define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */ +#define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */ + +/* We only expect UNMAP errors here. The rest of the Safari errors + * are marked fatal and thus cause a system reset. + */ +static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + u64 errlog; + + errlog = upa_readq(pbm->controller_regs + SCHIZO_SAFARI_ERRLOG); + upa_writeq(errlog & ~(SAFARI_ERRLOG_ERROUT), + pbm->controller_regs + SCHIZO_SAFARI_ERRLOG); + + if (!(errlog & BUS_ERROR_UNMAP)) { + printk("%s: Unexpected Safari/JBUS error interrupt, errlog[%016llx]\n", + pbm->name, errlog); + + return IRQ_HANDLED; + } + + printk("%s: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n", + pbm->name); + schizo_check_iommu_error(pbm, SAFARI_ERR); + + return IRQ_HANDLED; +} + +/* Nearly identical to PSYCHO equivalents... */ +#define SCHIZO_ECC_CTRL 0x10020UL +#define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ +#define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ +#define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ + +#define SCHIZO_SAFARI_ERRCTRL 0x10008UL +#define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL +#define SCHIZO_SAFARI_IRQCTRL 0x10010UL +#define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL + +static int pbm_routes_this_ino(struct pci_pbm_info *pbm, u32 ino) +{ + ino &= IMAP_INO; + + if (pbm->ino_bitmap & (1UL << ino)) + return 1; + + return 0; +} + +/* How the Tomatillo IRQs are routed around is pure guesswork here. + * + * All the Tomatillo devices I see in prtconf dumps seem to have only + * a single PCI bus unit attached to it. It would seem they are separate + * devices because their PortID (ie. JBUS ID) values are all different + * and thus the registers are mapped to totally different locations. + * + * However, two Tomatillo's look "similar" in that the only difference + * in their PortID is the lowest bit. + * + * So if we were to ignore this lower bit, it certainly looks like two + * PCI bus units of the same Tomatillo. I still have not really + * figured this out... + */ +static void tomatillo_register_error_handlers(struct pci_pbm_info *pbm) +{ + struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node); + u64 tmp, err_mask, err_no_mask; + int err; + + /* Tomatillo IRQ property layout is: + * 0: PCIERR + * 1: UE ERR + * 2: CE ERR + * 3: SERR + * 4: POWER FAIL? + */ + + if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) { + err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0, + "TOMATILLO_UE", pbm); + if (err) + printk(KERN_WARNING "%s: Could not register UE, " + "err=%d\n", pbm->name, err); + } + if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) { + err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0, + "TOMATILLO_CE", pbm); + if (err) + printk(KERN_WARNING "%s: Could not register CE, " + "err=%d\n", pbm->name, err); + } + err = 0; + if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) { + err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, + "TOMATILLO_PCIERR", pbm); + } else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) { + err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, + "TOMATILLO_PCIERR", pbm); + } + if (err) + printk(KERN_WARNING "%s: Could not register PCIERR, " + "err=%d\n", pbm->name, err); + + if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) { + err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0, + "TOMATILLO_SERR", pbm); + if (err) + printk(KERN_WARNING "%s: Could not register SERR, " + "err=%d\n", pbm->name, err); + } + + /* Enable UE and CE interrupts for controller. */ + upa_writeq((SCHIZO_ECCCTRL_EE | + SCHIZO_ECCCTRL_UE | + SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL); + + /* Enable PCI Error interrupts and clear error + * bits. + */ + err_mask = (SCHIZO_PCICTRL_BUS_UNUS | + SCHIZO_PCICTRL_TTO_ERR | + SCHIZO_PCICTRL_RTRY_ERR | + SCHIZO_PCICTRL_SERR | + SCHIZO_PCICTRL_EEN); + + err_no_mask = SCHIZO_PCICTRL_DTO_ERR; + + tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); + tmp |= err_mask; + tmp &= ~err_no_mask; + upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); + + err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | + SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | + SCHIZO_PCIAFSR_PTTO | + SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | + SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | + SCHIZO_PCIAFSR_STTO); + + upa_writeq(err_mask, pbm->pbm_regs + SCHIZO_PCI_AFSR); + + err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR | + BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD | + BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA | + BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO | + BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR | + BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B | + BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR | + BUS_ERROR_APERR | BUS_ERROR_UNMAP | + BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT); + + upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask), + pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL); + + upa_writeq((SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)), + pbm->controller_regs + SCHIZO_SAFARI_IRQCTRL); +} + +static void schizo_register_error_handlers(struct pci_pbm_info *pbm) +{ + struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node); + u64 tmp, err_mask, err_no_mask; + int err; + + /* Schizo IRQ property layout is: + * 0: PCIERR + * 1: UE ERR + * 2: CE ERR + * 3: SERR + * 4: POWER FAIL? + */ + + if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) { + err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0, + "SCHIZO_UE", pbm); + if (err) + printk(KERN_WARNING "%s: Could not register UE, " + "err=%d\n", pbm->name, err); + } + if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) { + err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0, + "SCHIZO_CE", pbm); + if (err) + printk(KERN_WARNING "%s: Could not register CE, " + "err=%d\n", pbm->name, err); + } + err = 0; + if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) { + err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, + "SCHIZO_PCIERR", pbm); + } else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) { + err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, + "SCHIZO_PCIERR", pbm); + } + if (err) + printk(KERN_WARNING "%s: Could not register PCIERR, " + "err=%d\n", pbm->name, err); + + if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) { + err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0, + "SCHIZO_SERR", pbm); + if (err) + printk(KERN_WARNING "%s: Could not register SERR, " + "err=%d\n", pbm->name, err); + } + + /* Enable UE and CE interrupts for controller. */ + upa_writeq((SCHIZO_ECCCTRL_EE | + SCHIZO_ECCCTRL_UE | + SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL); + + err_mask = (SCHIZO_PCICTRL_BUS_UNUS | + SCHIZO_PCICTRL_ESLCK | + SCHIZO_PCICTRL_TTO_ERR | + SCHIZO_PCICTRL_RTRY_ERR | + SCHIZO_PCICTRL_SBH_ERR | + SCHIZO_PCICTRL_SERR | + SCHIZO_PCICTRL_EEN); + + err_no_mask = (SCHIZO_PCICTRL_DTO_ERR | + SCHIZO_PCICTRL_SBH_INT); + + /* Enable PCI Error interrupts and clear error + * bits for each PBM. + */ + tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); + tmp |= err_mask; + tmp &= ~err_no_mask; + upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); + + upa_writeq((SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | + SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | + SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | + SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | + SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | + SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS), + pbm->pbm_regs + SCHIZO_PCI_AFSR); + + /* Make all Safari error conditions fatal except unmapped + * errors which we make generate interrupts. + */ + err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS | + BUS_ERROR_BADMA | BUS_ERROR_BADMB | + BUS_ERROR_BADMC | + BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | + BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB | + BUS_ERROR_CIQTO | + BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO | + BUS_ERROR_UFPQTO | BUS_ERROR_APERR | + BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT | + BUS_ERROR_ILL); +#if 1 + /* XXX Something wrong with some Excalibur systems + * XXX Sun is shipping. The behavior on a 2-cpu + * XXX machine is that both CPU1 parity error bits + * XXX are set and are immediately set again when + * XXX their error status bits are cleared. Just + * XXX ignore them for now. -DaveM + */ + err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | + BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB); +#endif + + upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask), + pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL); +} + +static void pbm_config_busmastering(struct pci_pbm_info *pbm) +{ + u8 *addr; + + /* Set cache-line size to 64 bytes, this is actually + * a nop but I do it for completeness. + */ + addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, + 0, PCI_CACHE_LINE_SIZE); + pci_config_write8(addr, 64 / sizeof(u32)); + + /* Set PBM latency timer to 64 PCI clocks. */ + addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, + 0, PCI_LATENCY_TIMER); + pci_config_write8(addr, 64); +} + +static void schizo_scan_bus(struct pci_pbm_info *pbm, struct device *parent) +{ + pbm_config_busmastering(pbm); + pbm->is_66mhz_capable = + (of_find_property(pbm->op->dev.of_node, "66mhz-capable", NULL) + != NULL); + + pbm->pci_bus = pci_scan_one_pbm(pbm, parent); + + if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) + tomatillo_register_error_handlers(pbm); + else + schizo_register_error_handlers(pbm); +} + +#define SCHIZO_STRBUF_CONTROL (0x02800UL) +#define SCHIZO_STRBUF_FLUSH (0x02808UL) +#define SCHIZO_STRBUF_FSYNC (0x02810UL) +#define SCHIZO_STRBUF_CTXFLUSH (0x02818UL) +#define SCHIZO_STRBUF_CTXMATCH (0x10000UL) + +static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) +{ + unsigned long base = pbm->pbm_regs; + u64 control; + + if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { + /* TOMATILLO lacks streaming cache. */ + return; + } + + /* SCHIZO has context flushing. */ + pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL; + pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH; + pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC; + pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH; + pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH; + + pbm->stc.strbuf_flushflag = (volatile unsigned long *) + ((((unsigned long)&pbm->stc.__flushflag_buf[0]) + + 63UL) + & ~63UL); + pbm->stc.strbuf_flushflag_pa = (unsigned long) + __pa(pbm->stc.strbuf_flushflag); + + /* Turn off LRU locking and diag mode, enable the + * streaming buffer and leave the rerun-disable + * setting however OBP set it. + */ + control = upa_readq(pbm->stc.strbuf_control); + control &= ~(SCHIZO_STRBUF_CTRL_LPTR | + SCHIZO_STRBUF_CTRL_LENAB | + SCHIZO_STRBUF_CTRL_DENAB); + control |= SCHIZO_STRBUF_CTRL_ENAB; + upa_writeq(control, pbm->stc.strbuf_control); + + pbm->stc.strbuf_enabled = 1; +} + +#define SCHIZO_IOMMU_CONTROL (0x00200UL) +#define SCHIZO_IOMMU_TSBBASE (0x00208UL) +#define SCHIZO_IOMMU_FLUSH (0x00210UL) +#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL) + +static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm) +{ + static const u32 vdma_default[] = { 0xc0000000, 0x40000000 }; + unsigned long i, tagbase, database; + struct iommu *iommu = pbm->iommu; + int tsbsize, err; + const u32 *vdma; + u32 dma_mask; + u64 control; + + vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); + if (!vdma) + vdma = vdma_default; + + dma_mask = vdma[0]; + switch (vdma[1]) { + case 0x20000000: + dma_mask |= 0x1fffffff; + tsbsize = 64; + break; + + case 0x40000000: + dma_mask |= 0x3fffffff; + tsbsize = 128; + break; + + case 0x80000000: + dma_mask |= 0x7fffffff; + tsbsize = 128; + break; + + default: + printk(KERN_ERR PFX "Strange virtual-dma size.\n"); + return -EINVAL; + } + + /* Register addresses, SCHIZO has iommu ctx flushing. */ + iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; + iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; + iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH; + iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL); + iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH; + + /* We use the main control/status register of SCHIZO as the write + * completion register. + */ + iommu->write_complete_reg = pbm->controller_regs + 0x10000UL; + + /* + * Invalidate TLB Entries. + */ + control = upa_readq(iommu->iommu_control); + control |= SCHIZO_IOMMU_CTRL_DENAB; + upa_writeq(control, iommu->iommu_control); + + tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA; + + for (i = 0; i < 16; i++) { + upa_writeq(0, pbm->pbm_regs + tagbase + (i * 8UL)); + upa_writeq(0, pbm->pbm_regs + database + (i * 8UL)); + } + + /* Leave diag mode enabled for full-flushing done + * in pci_iommu.c + */ + err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask, + pbm->numa_node); + if (err) { + printk(KERN_ERR PFX "iommu_table_init() fails with %d\n", err); + return err; + } + + upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); + + control = upa_readq(iommu->iommu_control); + control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); + switch (tsbsize) { + case 64: + control |= SCHIZO_IOMMU_TSBSZ_64K; + break; + case 128: + control |= SCHIZO_IOMMU_TSBSZ_128K; + break; + } + + control |= SCHIZO_IOMMU_CTRL_ENAB; + upa_writeq(control, iommu->iommu_control); + + return 0; +} + +#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL) +#define SCHIZO_IRQ_RETRY_INF 0xffUL + +#define SCHIZO_PCI_DIAG (0x2020UL) +#define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */ +#define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */ +#define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */ +#define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */ +#define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */ +#define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */ +#define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */ +#define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */ +#define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */ + +#define TOMATILLO_PCI_IOC_CSR (0x2248UL) +#define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL +#define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL +#define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL +#define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL +#define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL +#define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL +#define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL +#define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL +#define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL +#define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL +#define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL +#define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL +#define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL +#define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL +#define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL + +#define TOMATILLO_PCI_IOC_TDIAG (0x2250UL) +#define TOMATILLO_PCI_IOC_DDIAG (0x2290UL) + +static void schizo_pbm_hw_init(struct pci_pbm_info *pbm) +{ + u64 tmp; + + upa_writeq(5, pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY); + + tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); + + /* Enable arbiter for all PCI slots. */ + tmp |= 0xff; + + if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && + pbm->chip_version >= 0x2) + tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; + + if (!of_find_property(pbm->op->dev.of_node, "no-bus-parking", NULL)) + tmp |= SCHIZO_PCICTRL_PARK; + else + tmp &= ~SCHIZO_PCICTRL_PARK; + + if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && + pbm->chip_version <= 0x1) + tmp |= SCHIZO_PCICTRL_DTO_INT; + else + tmp &= ~SCHIZO_PCICTRL_DTO_INT; + + if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) + tmp |= (SCHIZO_PCICTRL_MRM_PREF | + SCHIZO_PCICTRL_RDO_PREF | + SCHIZO_PCICTRL_RDL_PREF); + + upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); + + tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_DIAG); + tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB | + SCHIZO_PCIDIAG_D_RETRY | + SCHIZO_PCIDIAG_D_INTSYNC); + upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_DIAG); + + if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { + /* Clear prefetch lengths to workaround a bug in + * Jalapeno... + */ + tmp = (TOMATILLO_IOC_PART_WPENAB | + (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) | + TOMATILLO_IOC_RDMULT_CPENAB | + TOMATILLO_IOC_RDONE_CPENAB | + TOMATILLO_IOC_RDLINE_CPENAB); + + upa_writeq(tmp, pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR); + } +} + +static int schizo_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op, u32 portid, + int chip_type) +{ + const struct linux_prom64_registers *regs; + struct device_node *dp = op->dev.of_node; + const char *chipset_name; + int err; + + switch (chip_type) { + case PBM_CHIP_TYPE_TOMATILLO: + chipset_name = "TOMATILLO"; + break; + + case PBM_CHIP_TYPE_SCHIZO_PLUS: + chipset_name = "SCHIZO+"; + break; + + case PBM_CHIP_TYPE_SCHIZO: + default: + chipset_name = "SCHIZO"; + break; + } + + /* For SCHIZO, three OBP regs: + * 1) PBM controller regs + * 2) Schizo front-end controller regs (same for both PBMs) + * 3) PBM PCI config space + * + * For TOMATILLO, four OBP regs: + * 1) PBM controller regs + * 2) Tomatillo front-end controller regs + * 3) PBM PCI config space + * 4) Ichip regs + */ + regs = of_get_property(dp, "reg", NULL); + + pbm->next = pci_pbm_root; + pci_pbm_root = pbm; + + pbm->numa_node = NUMA_NO_NODE; + + pbm->pci_ops = &sun4u_pci_ops; + pbm->config_space_reg_bits = 8; + + pbm->index = pci_num_pbms++; + + pbm->portid = portid; + pbm->op = op; + + pbm->chip_type = chip_type; + pbm->chip_version = of_getintprop_default(dp, "version#", 0); + pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0); + + pbm->pbm_regs = regs[0].phys_addr; + pbm->controller_regs = regs[1].phys_addr - 0x10000UL; + + if (chip_type == PBM_CHIP_TYPE_TOMATILLO) + pbm->sync_reg = regs[3].phys_addr + 0x1a18UL; + + pbm->name = dp->full_name; + + printk("%s: %s PCI Bus Module ver[%x:%x]\n", + pbm->name, chipset_name, + pbm->chip_version, pbm->chip_revision); + + schizo_pbm_hw_init(pbm); + + pci_determine_mem_io_space(pbm); + + pci_get_pbm_props(pbm); + + err = schizo_pbm_iommu_init(pbm); + if (err) + return err; + + schizo_pbm_strbuf_init(pbm); + + schizo_scan_bus(pbm, &op->dev); + + return 0; +} + +static inline int portid_compare(u32 x, u32 y, int chip_type) +{ + if (chip_type == PBM_CHIP_TYPE_TOMATILLO) { + if (x == (y ^ 1)) + return 1; + return 0; + } + return (x == y); +} + +static struct pci_pbm_info *schizo_find_sibling(u32 portid, int chip_type) +{ + struct pci_pbm_info *pbm; + + for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { + if (portid_compare(pbm->portid, portid, chip_type)) + return pbm; + } + return NULL; +} + +static int __schizo_init(struct platform_device *op, unsigned long chip_type) +{ + struct device_node *dp = op->dev.of_node; + struct pci_pbm_info *pbm; + struct iommu *iommu; + u32 portid; + int err; + + portid = of_getintprop_default(dp, "portid", 0xff); + + err = -ENOMEM; + pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); + if (!pbm) { + printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); + goto out_err; + } + + pbm->sibling = schizo_find_sibling(portid, chip_type); + + iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); + if (!iommu) { + printk(KERN_ERR PFX "Cannot allocate PBM A iommu.\n"); + goto out_free_pbm; + } + + pbm->iommu = iommu; + + if (schizo_pbm_init(pbm, op, portid, chip_type)) + goto out_free_iommu; + + if (pbm->sibling) + pbm->sibling->sibling = pbm; + + dev_set_drvdata(&op->dev, pbm); + + return 0; + +out_free_iommu: + kfree(pbm->iommu); + +out_free_pbm: + kfree(pbm); + +out_err: + return err; +} + +static const struct of_device_id schizo_match[]; +static int schizo_probe(struct platform_device *op) +{ + const struct of_device_id *match; + + match = of_match_device(schizo_match, &op->dev); + if (!match) + return -EINVAL; + return __schizo_init(op, (unsigned long)match->data); +} + +/* The ordering of this table is very important. Some Tomatillo + * nodes announce that they are compatible with both pci108e,a801 + * and pci108e,8001. So list the chips in reverse chronological + * order. + */ +static const struct of_device_id schizo_match[] = { + { + .name = "pci", + .compatible = "pci108e,a801", + .data = (void *) PBM_CHIP_TYPE_TOMATILLO, + }, + { + .name = "pci", + .compatible = "pci108e,8002", + .data = (void *) PBM_CHIP_TYPE_SCHIZO_PLUS, + }, + { + .name = "pci", + .compatible = "pci108e,8001", + .data = (void *) PBM_CHIP_TYPE_SCHIZO, + }, + {}, +}; + +static struct platform_driver schizo_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = schizo_match, + }, + .probe = schizo_probe, +}; + +static int __init schizo_init(void) +{ + return platform_driver_register(&schizo_driver); +} + +subsys_initcall(schizo_init); diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c new file mode 100644 index 000000000..384480971 --- /dev/null +++ b/arch/sparc/kernel/pci_sun4v.c @@ -0,0 +1,1349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pci_sun4v.c: SUN4V specific PCI controller support. + * + * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pci_impl.h" +#include "iommu_common.h" +#include "kernel.h" + +#include "pci_sun4v.h" + +#define DRIVER_NAME "pci_sun4v" +#define PFX DRIVER_NAME ": " + +static unsigned long vpci_major; +static unsigned long vpci_minor; + +struct vpci_version { + unsigned long major; + unsigned long minor; +}; + +/* Ordered from largest major to lowest */ +static struct vpci_version vpci_versions[] = { + { .major = 2, .minor = 0 }, + { .major = 1, .minor = 1 }, +}; + +static unsigned long vatu_major = 1; +static unsigned long vatu_minor = 1; + +#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) + +struct iommu_batch { + struct device *dev; /* Device mapping is for. */ + unsigned long prot; /* IOMMU page protections */ + unsigned long entry; /* Index into IOTSB. */ + u64 *pglist; /* List of physical pages */ + unsigned long npages; /* Number of pages in list. */ +}; + +static DEFINE_PER_CPU(struct iommu_batch, iommu_batch); +static int iommu_batch_initialized; + +/* Interrupts must be disabled. */ +static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) +{ + struct iommu_batch *p = this_cpu_ptr(&iommu_batch); + + p->dev = dev; + p->prot = prot; + p->entry = entry; + p->npages = 0; +} + +static inline bool iommu_use_atu(struct iommu *iommu, u64 mask) +{ + return iommu->atu && mask > DMA_BIT_MASK(32); +} + +/* Interrupts must be disabled. */ +static long iommu_batch_flush(struct iommu_batch *p, u64 mask) +{ + struct pci_pbm_info *pbm = p->dev->archdata.host_controller; + u64 *pglist = p->pglist; + u64 index_count; + unsigned long devhandle = pbm->devhandle; + unsigned long prot = p->prot; + unsigned long entry = p->entry; + unsigned long npages = p->npages; + unsigned long iotsb_num; + unsigned long ret; + long num; + + /* VPCI maj=1, min=[0,1] only supports read and write */ + if (vpci_major < 2) + prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); + + while (npages != 0) { + if (!iommu_use_atu(pbm->iommu, mask)) { + num = pci_sun4v_iommu_map(devhandle, + HV_PCI_TSBID(0, entry), + npages, + prot, + __pa(pglist)); + if (unlikely(num < 0)) { + pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n", + __func__, + devhandle, + HV_PCI_TSBID(0, entry), + npages, prot, __pa(pglist), + num); + return -1; + } + } else { + index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), + iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; + ret = pci_sun4v_iotsb_map(devhandle, + iotsb_num, + index_count, + prot, + __pa(pglist), + &num); + if (unlikely(ret != HV_EOK)) { + pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n", + __func__, + devhandle, iotsb_num, + index_count, prot, + __pa(pglist), ret); + return -1; + } + } + entry += num; + npages -= num; + pglist += num; + } + + p->entry = entry; + p->npages = 0; + + return 0; +} + +static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) +{ + struct iommu_batch *p = this_cpu_ptr(&iommu_batch); + + if (p->entry + p->npages == entry) + return; + if (p->entry != ~0UL) + iommu_batch_flush(p, mask); + p->entry = entry; +} + +/* Interrupts must be disabled. */ +static inline long iommu_batch_add(u64 phys_page, u64 mask) +{ + struct iommu_batch *p = this_cpu_ptr(&iommu_batch); + + BUG_ON(p->npages >= PGLIST_NENTS); + + p->pglist[p->npages++] = phys_page; + if (p->npages == PGLIST_NENTS) + return iommu_batch_flush(p, mask); + + return 0; +} + +/* Interrupts must be disabled. */ +static inline long iommu_batch_end(u64 mask) +{ + struct iommu_batch *p = this_cpu_ptr(&iommu_batch); + + BUG_ON(p->npages >= PGLIST_NENTS); + + return iommu_batch_flush(p, mask); +} + +static void *dma_4v_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addrp, gfp_t gfp, + unsigned long attrs) +{ + u64 mask; + unsigned long flags, order, first_page, npages, n; + unsigned long prot = 0; + struct iommu *iommu; + struct iommu_map_table *tbl; + struct page *page; + void *ret; + long entry; + int nid; + + size = IO_PAGE_ALIGN(size); + order = get_order(size); + if (unlikely(order >= MAX_ORDER)) + return NULL; + + npages = size >> IO_PAGE_SHIFT; + + if (attrs & DMA_ATTR_WEAK_ORDERING) + prot = HV_PCI_MAP_ATTR_RELAXED_ORDER; + + nid = dev->archdata.numa_node; + page = alloc_pages_node(nid, gfp, order); + if (unlikely(!page)) + return NULL; + + first_page = (unsigned long) page_address(page); + memset((char *)first_page, 0, PAGE_SIZE << order); + + iommu = dev->archdata.iommu; + mask = dev->coherent_dma_mask; + if (!iommu_use_atu(iommu, mask)) + tbl = &iommu->tbl; + else + tbl = &iommu->atu->tbl; + + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, + (unsigned long)(-1), 0); + + if (unlikely(entry == IOMMU_ERROR_CODE)) + goto range_alloc_fail; + + *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); + ret = (void *) first_page; + first_page = __pa(first_page); + + local_irq_save(flags); + + iommu_batch_start(dev, + (HV_PCI_MAP_ATTR_READ | prot | + HV_PCI_MAP_ATTR_WRITE), + entry); + + for (n = 0; n < npages; n++) { + long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); + if (unlikely(err < 0L)) + goto iommu_map_fail; + } + + if (unlikely(iommu_batch_end(mask) < 0L)) + goto iommu_map_fail; + + local_irq_restore(flags); + + return ret; + +iommu_map_fail: + local_irq_restore(flags); + iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); + +range_alloc_fail: + free_pages(first_page, order); + return NULL; +} + +unsigned long dma_4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + struct pci_bus *bus_dev) +{ + struct pci_dev *pdev; + unsigned long err; + unsigned int bus; + unsigned int device; + unsigned int fun; + + list_for_each_entry(pdev, &bus_dev->devices, bus_list) { + if (pdev->subordinate) { + /* No need to bind pci bridge */ + dma_4v_iotsb_bind(devhandle, iotsb_num, + pdev->subordinate); + } else { + bus = bus_dev->number; + device = PCI_SLOT(pdev->devfn); + fun = PCI_FUNC(pdev->devfn); + err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, + HV_PCI_DEVICE_BUILD(bus, + device, + fun)); + + /* If bind fails for one device it is going to fail + * for rest of the devices because we are sharing + * IOTSB. So in case of failure simply return with + * error. + */ + if (err) + return err; + } + } + + return 0; +} + +static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, + dma_addr_t dvma, unsigned long iotsb_num, + unsigned long entry, unsigned long npages) +{ + unsigned long num, flags; + unsigned long ret; + + local_irq_save(flags); + do { + if (dvma <= DMA_BIT_MASK(32)) { + num = pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, entry), + npages); + } else { + ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, + entry, npages, &num); + if (unlikely(ret != HV_EOK)) { + pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n", + ret); + } + } + entry += num; + npages -= num; + } while (npages != 0); + local_irq_restore(flags); +} + +static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, + dma_addr_t dvma, unsigned long attrs) +{ + struct pci_pbm_info *pbm; + struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + unsigned long order, npages, entry; + unsigned long iotsb_num; + u32 devhandle; + + npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; + iommu = dev->archdata.iommu; + pbm = dev->archdata.host_controller; + atu = iommu->atu; + devhandle = pbm->devhandle; + + if (!iommu_use_atu(iommu, dvma)) { + tbl = &iommu->tbl; + iotsb_num = 0; /* we don't care for legacy iommu */ + } else { + tbl = &atu->tbl; + iotsb_num = atu->iotsb->iotsb_num; + } + entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); + dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); + iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); + order = get_order(size); + if (order < 10) + free_pages((unsigned long)cpu, order); +} + +static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t sz, + enum dma_data_direction direction, + unsigned long attrs) +{ + struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + u64 mask; + unsigned long flags, npages, oaddr; + unsigned long i, base_paddr; + unsigned long prot; + dma_addr_t bus_addr, ret; + long entry; + + iommu = dev->archdata.iommu; + atu = iommu->atu; + + if (unlikely(direction == DMA_NONE)) + goto bad; + + oaddr = (unsigned long)(page_address(page) + offset); + npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); + npages >>= IO_PAGE_SHIFT; + + mask = *dev->dma_mask; + if (!iommu_use_atu(iommu, mask)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, + (unsigned long)(-1), 0); + + if (unlikely(entry == IOMMU_ERROR_CODE)) + goto bad; + + bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); + ret = bus_addr | (oaddr & ~IO_PAGE_MASK); + base_paddr = __pa(oaddr & IO_PAGE_MASK); + prot = HV_PCI_MAP_ATTR_READ; + if (direction != DMA_TO_DEVICE) + prot |= HV_PCI_MAP_ATTR_WRITE; + + if (attrs & DMA_ATTR_WEAK_ORDERING) + prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER; + + local_irq_save(flags); + + iommu_batch_start(dev, prot, entry); + + for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { + long err = iommu_batch_add(base_paddr, mask); + if (unlikely(err < 0L)) + goto iommu_map_fail; + } + if (unlikely(iommu_batch_end(mask) < 0L)) + goto iommu_map_fail; + + local_irq_restore(flags); + + return ret; + +bad: + if (printk_ratelimit()) + WARN_ON(1); + return DMA_MAPPING_ERROR; + +iommu_map_fail: + local_irq_restore(flags); + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); + return DMA_MAPPING_ERROR; +} + +static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, + size_t sz, enum dma_data_direction direction, + unsigned long attrs) +{ + struct pci_pbm_info *pbm; + struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + unsigned long npages; + unsigned long iotsb_num; + long entry; + u32 devhandle; + + if (unlikely(direction == DMA_NONE)) { + if (printk_ratelimit()) + WARN_ON(1); + return; + } + + iommu = dev->archdata.iommu; + pbm = dev->archdata.host_controller; + atu = iommu->atu; + devhandle = pbm->devhandle; + + npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); + npages >>= IO_PAGE_SHIFT; + bus_addr &= IO_PAGE_MASK; + + if (bus_addr <= DMA_BIT_MASK(32)) { + iotsb_num = 0; /* we don't care for legacy iommu */ + tbl = &iommu->tbl; + } else { + iotsb_num = atu->iotsb->iotsb_num; + tbl = &atu->tbl; + } + entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; + dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); +} + +static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction, + unsigned long attrs) +{ + struct scatterlist *s, *outs, *segstart; + unsigned long flags, handle, prot; + dma_addr_t dma_next = 0, dma_addr; + unsigned int max_seg_size; + unsigned long seg_boundary_size; + int outcount, incount, i; + struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + u64 mask; + unsigned long base_shift; + long err; + + BUG_ON(direction == DMA_NONE); + + iommu = dev->archdata.iommu; + if (nelems == 0 || !iommu) + return -EINVAL; + atu = iommu->atu; + + prot = HV_PCI_MAP_ATTR_READ; + if (direction != DMA_TO_DEVICE) + prot |= HV_PCI_MAP_ATTR_WRITE; + + if (attrs & DMA_ATTR_WEAK_ORDERING) + prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER; + + outs = s = segstart = &sglist[0]; + outcount = 1; + incount = nelems; + handle = 0; + + /* Init first segment length for backout at failure */ + outs->dma_length = 0; + + local_irq_save(flags); + + iommu_batch_start(dev, prot, ~0UL); + + max_seg_size = dma_get_max_seg_size(dev); + seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); + + mask = *dev->dma_mask; + if (!iommu_use_atu(iommu, mask)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; + + for_each_sg(sglist, s, nelems, i) { + unsigned long paddr, npages, entry, out_entry = 0, slen; + + slen = s->length; + /* Sanity check */ + if (slen == 0) { + dma_next = 0; + continue; + } + /* Allocate iommu entries for that segment */ + paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); + npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); + entry = iommu_tbl_range_alloc(dev, tbl, npages, + &handle, (unsigned long)(-1), 0); + + /* Handle failure */ + if (unlikely(entry == IOMMU_ERROR_CODE)) { + pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n", + tbl, paddr, npages); + goto iommu_map_failed; + } + + iommu_batch_new_entry(entry, mask); + + /* Convert entry to a dma_addr_t */ + dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); + dma_addr |= (s->offset & ~IO_PAGE_MASK); + + /* Insert into HW table */ + paddr &= IO_PAGE_MASK; + while (npages--) { + err = iommu_batch_add(paddr, mask); + if (unlikely(err < 0L)) + goto iommu_map_failed; + paddr += IO_PAGE_SIZE; + } + + /* If we are in an open segment, try merging */ + if (segstart != s) { + /* We cannot merge if: + * - allocated dma_addr isn't contiguous to previous allocation + */ + if ((dma_addr != dma_next) || + (outs->dma_length + s->length > max_seg_size) || + (is_span_boundary(out_entry, base_shift, + seg_boundary_size, outs, s))) { + /* Can't merge: create a new segment */ + segstart = s; + outcount++; + outs = sg_next(outs); + } else { + outs->dma_length += s->length; + } + } + + if (segstart == s) { + /* This is a new segment, fill entries */ + outs->dma_address = dma_addr; + outs->dma_length = slen; + out_entry = entry; + } + + /* Calculate next page pointer for contiguous check */ + dma_next = dma_addr + slen; + } + + err = iommu_batch_end(mask); + + if (unlikely(err < 0L)) + goto iommu_map_failed; + + local_irq_restore(flags); + + if (outcount < incount) { + outs = sg_next(outs); + outs->dma_length = 0; + } + + return outcount; + +iommu_map_failed: + for_each_sg(sglist, s, nelems, i) { + if (s->dma_length != 0) { + unsigned long vaddr, npages; + + vaddr = s->dma_address & IO_PAGE_MASK; + npages = iommu_num_pages(s->dma_address, s->dma_length, + IO_PAGE_SIZE); + iommu_tbl_range_free(tbl, vaddr, npages, + IOMMU_ERROR_CODE); + /* XXX demap? XXX */ + s->dma_length = 0; + } + if (s == outs) + break; + } + local_irq_restore(flags); + + return -EINVAL; +} + +static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction, + unsigned long attrs) +{ + struct pci_pbm_info *pbm; + struct scatterlist *sg; + struct iommu *iommu; + struct atu *atu; + unsigned long flags, entry; + unsigned long iotsb_num; + u32 devhandle; + + BUG_ON(direction == DMA_NONE); + + iommu = dev->archdata.iommu; + pbm = dev->archdata.host_controller; + atu = iommu->atu; + devhandle = pbm->devhandle; + + local_irq_save(flags); + + sg = sglist; + while (nelems--) { + dma_addr_t dma_handle = sg->dma_address; + unsigned int len = sg->dma_length; + unsigned long npages; + struct iommu_map_table *tbl; + unsigned long shift = IO_PAGE_SHIFT; + + if (!len) + break; + npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); + + if (dma_handle <= DMA_BIT_MASK(32)) { + iotsb_num = 0; /* we don't care for legacy iommu */ + tbl = &iommu->tbl; + } else { + iotsb_num = atu->iotsb->iotsb_num; + tbl = &atu->tbl; + } + entry = ((dma_handle - tbl->table_map_base) >> shift); + dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num, + entry, npages); + iommu_tbl_range_free(tbl, dma_handle, npages, + IOMMU_ERROR_CODE); + sg = sg_next(sg); + } + + local_irq_restore(flags); +} + +static int dma_4v_supported(struct device *dev, u64 device_mask) +{ + struct iommu *iommu = dev->archdata.iommu; + + if (ali_sound_dma_hack(dev, device_mask)) + return 1; + if (device_mask < iommu->dma_addr_mask) + return 0; + return 1; +} + +static const struct dma_map_ops sun4v_dma_ops = { + .alloc = dma_4v_alloc_coherent, + .free = dma_4v_free_coherent, + .map_page = dma_4v_map_page, + .unmap_page = dma_4v_unmap_page, + .map_sg = dma_4v_map_sg, + .unmap_sg = dma_4v_unmap_sg, + .dma_supported = dma_4v_supported, +}; + +static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) +{ + struct property *prop; + struct device_node *dp; + + dp = pbm->op->dev.of_node; + prop = of_find_property(dp, "66mhz-capable", NULL); + pbm->is_66mhz_capable = (prop != NULL); + pbm->pci_bus = pci_scan_one_pbm(pbm, parent); + + /* XXX register error interrupt handlers XXX */ +} + +static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, + struct iommu_map_table *iommu) +{ + struct iommu_pool *pool; + unsigned long i, pool_nr, cnt = 0; + u32 devhandle; + + devhandle = pbm->devhandle; + for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { + pool = &(iommu->pools[pool_nr]); + for (i = pool->start; i <= pool->end; i++) { + unsigned long ret, io_attrs, ra; + + ret = pci_sun4v_iommu_getmap(devhandle, + HV_PCI_TSBID(0, i), + &io_attrs, &ra); + if (ret == HV_EOK) { + if (page_in_phys_avail(ra)) { + pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, + i), 1); + } else { + cnt++; + __set_bit(i, iommu->map); + } + } + } + } + return cnt; +} + +static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) +{ + struct atu *atu = pbm->iommu->atu; + struct atu_iotsb *iotsb; + void *table; + u64 table_size; + u64 iotsb_num; + unsigned long order; + unsigned long err; + + iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); + if (!iotsb) { + err = -ENOMEM; + goto out_err; + } + atu->iotsb = iotsb; + + /* calculate size of IOTSB */ + table_size = (atu->size / IO_PAGE_SIZE) * 8; + order = get_order(table_size); + table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!table) { + err = -ENOMEM; + goto table_failed; + } + iotsb->table = table; + iotsb->ra = __pa(table); + iotsb->dvma_size = atu->size; + iotsb->dvma_base = atu->base; + iotsb->table_size = table_size; + iotsb->page_size = IO_PAGE_SIZE; + + /* configure and register IOTSB with HV */ + err = pci_sun4v_iotsb_conf(pbm->devhandle, + iotsb->ra, + iotsb->table_size, + iotsb->page_size, + iotsb->dvma_base, + &iotsb_num); + if (err) { + pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err); + goto iotsb_conf_failed; + } + iotsb->iotsb_num = iotsb_num; + + err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus); + if (err) { + pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err); + goto iotsb_conf_failed; + } + + return 0; + +iotsb_conf_failed: + free_pages((unsigned long)table, order); +table_failed: + kfree(iotsb); +out_err: + return err; +} + +static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) +{ + struct atu *atu = pbm->iommu->atu; + unsigned long err; + const u64 *ranges; + u64 map_size, num_iotte; + u64 dma_mask; + const u32 *page_size; + int len; + + ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges", + &len); + if (!ranges) { + pr_err(PFX "No iommu-address-ranges\n"); + return -EINVAL; + } + + page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes", + NULL); + if (!page_size) { + pr_err(PFX "No iommu-pagesizes\n"); + return -EINVAL; + } + + /* There are 4 iommu-address-ranges supported. Each range is pair of + * {base, size}. The ranges[0] and ranges[1] are 32bit address space + * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit + * address ranges to support 64bit addressing. Because 'size' for + * address ranges[2] and ranges[3] are same we can select either of + * ranges[2] or ranges[3] for mapping. However due to 'size' is too + * large for OS to allocate IOTSB we are using fix size 32G + * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices + * to share. + */ + atu->ranges = (struct atu_ranges *)ranges; + atu->base = atu->ranges[3].base; + atu->size = ATU_64_SPACE_SIZE; + + /* Create IOTSB */ + err = pci_sun4v_atu_alloc_iotsb(pbm); + if (err) { + pr_err(PFX "Error creating ATU IOTSB\n"); + return err; + } + + /* Create ATU iommu map. + * One bit represents one iotte in IOTSB table. + */ + dma_mask = (roundup_pow_of_two(atu->size) - 1UL); + num_iotte = atu->size / IO_PAGE_SIZE; + map_size = num_iotte / 8; + atu->tbl.table_map_base = atu->base; + atu->dma_addr_mask = dma_mask; + atu->tbl.map = kzalloc(map_size, GFP_KERNEL); + if (!atu->tbl.map) + return -ENOMEM; + + iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, + NULL, false /* no large_pool */, + 0 /* default npools */, + false /* want span boundary checking */); + + return 0; +} + +static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) +{ + static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; + struct iommu *iommu = pbm->iommu; + unsigned long num_tsb_entries, sz; + u32 dma_mask, dma_offset; + const u32 *vdma; + + vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); + if (!vdma) + vdma = vdma_default; + + if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { + printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n", + vdma[0], vdma[1]); + return -EINVAL; + } + + dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); + num_tsb_entries = vdma[1] / IO_PAGE_SIZE; + + dma_offset = vdma[0]; + + /* Setup initial software IOMMU state. */ + spin_lock_init(&iommu->lock); + iommu->ctx_lowest_free = 1; + iommu->tbl.table_map_base = dma_offset; + iommu->dma_addr_mask = dma_mask; + + /* Allocate and initialize the free area map. */ + sz = (num_tsb_entries + 7) / 8; + sz = (sz + 7UL) & ~7UL; + iommu->tbl.map = kzalloc(sz, GFP_KERNEL); + if (!iommu->tbl.map) { + printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); + return -ENOMEM; + } + iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, + NULL, false /* no large_pool */, + 0 /* default npools */, + false /* want span boundary checking */); + sz = probe_existing_entries(pbm, &iommu->tbl); + if (sz) + printk("%s: Imported %lu TSB entries from OBP\n", + pbm->name, sz); + + return 0; +} + +#ifdef CONFIG_PCI_MSI +struct pci_sun4v_msiq_entry { + u64 version_type; +#define MSIQ_VERSION_MASK 0xffffffff00000000UL +#define MSIQ_VERSION_SHIFT 32 +#define MSIQ_TYPE_MASK 0x00000000000000ffUL +#define MSIQ_TYPE_SHIFT 0 +#define MSIQ_TYPE_NONE 0x00 +#define MSIQ_TYPE_MSG 0x01 +#define MSIQ_TYPE_MSI32 0x02 +#define MSIQ_TYPE_MSI64 0x03 +#define MSIQ_TYPE_INTX 0x08 +#define MSIQ_TYPE_NONE2 0xff + + u64 intx_sysino; + u64 reserved1; + u64 stick; + u64 req_id; /* bus/device/func */ +#define MSIQ_REQID_BUS_MASK 0xff00UL +#define MSIQ_REQID_BUS_SHIFT 8 +#define MSIQ_REQID_DEVICE_MASK 0x00f8UL +#define MSIQ_REQID_DEVICE_SHIFT 3 +#define MSIQ_REQID_FUNC_MASK 0x0007UL +#define MSIQ_REQID_FUNC_SHIFT 0 + + u64 msi_address; + + /* The format of this value is message type dependent. + * For MSI bits 15:0 are the data from the MSI packet. + * For MSI-X bits 31:0 are the data from the MSI packet. + * For MSG, the message code and message routing code where: + * bits 39:32 is the bus/device/fn of the msg target-id + * bits 18:16 is the message routing code + * bits 7:0 is the message code + * For INTx the low order 2-bits are: + * 00 - INTA + * 01 - INTB + * 10 - INTC + * 11 - INTD + */ + u64 msi_data; + + u64 reserved2; +}; + +static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long *head) +{ + unsigned long err, limit; + + err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); + if (unlikely(err)) + return -ENXIO; + + limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); + if (unlikely(*head >= limit)) + return -EFBIG; + + return 0; +} + +static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, + unsigned long msiqid, unsigned long *head, + unsigned long *msi) +{ + struct pci_sun4v_msiq_entry *ep; + unsigned long err, type; + + /* Note: void pointer arithmetic, 'head' is a byte offset */ + ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * + (pbm->msiq_ent_count * + sizeof(struct pci_sun4v_msiq_entry))) + + *head); + + if ((ep->version_type & MSIQ_TYPE_MASK) == 0) + return 0; + + type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; + if (unlikely(type != MSIQ_TYPE_MSI32 && + type != MSIQ_TYPE_MSI64)) + return -EINVAL; + + *msi = ep->msi_data; + + err = pci_sun4v_msi_setstate(pbm->devhandle, + ep->msi_data /* msi_num */, + HV_MSISTATE_IDLE); + if (unlikely(err)) + return -ENXIO; + + /* Clear the entry. */ + ep->version_type &= ~MSIQ_TYPE_MASK; + + (*head) += sizeof(struct pci_sun4v_msiq_entry); + if (*head >= + (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) + *head = 0; + + return 1; +} + +static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long head) +{ + unsigned long err; + + err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); + if (unlikely(err)) + return -EINVAL; + + return 0; +} + +static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, + unsigned long msi, int is_msi64) +{ + if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, + (is_msi64 ? + HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) + return -ENXIO; + if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) + return -ENXIO; + if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) + return -ENXIO; + return 0; +} + +static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) +{ + unsigned long err, msiqid; + + err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid); + if (err) + return -ENXIO; + + pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); + + return 0; +} + +static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) +{ + unsigned long q_size, alloc_size, pages, order; + int i; + + q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); + alloc_size = (pbm->msiq_num * q_size); + order = get_order(alloc_size); + pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); + if (pages == 0UL) { + printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", + order); + return -ENOMEM; + } + memset((char *)pages, 0, PAGE_SIZE << order); + pbm->msi_queues = (void *) pages; + + for (i = 0; i < pbm->msiq_num; i++) { + unsigned long err, base = __pa(pages + (i * q_size)); + unsigned long ret1, ret2; + + err = pci_sun4v_msiq_conf(pbm->devhandle, + pbm->msiq_first + i, + base, pbm->msiq_ent_count); + if (err) { + printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", + err); + goto h_error; + } + + err = pci_sun4v_msiq_info(pbm->devhandle, + pbm->msiq_first + i, + &ret1, &ret2); + if (err) { + printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", + err); + goto h_error; + } + if (ret1 != base || ret2 != pbm->msiq_ent_count) { + printk(KERN_ERR "MSI: Bogus qconf " + "expected[%lx:%x] got[%lx:%lx]\n", + base, pbm->msiq_ent_count, + ret1, ret2); + goto h_error; + } + } + + return 0; + +h_error: + free_pages(pages, order); + return -EINVAL; +} + +static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) +{ + unsigned long q_size, alloc_size, pages, order; + int i; + + for (i = 0; i < pbm->msiq_num; i++) { + unsigned long msiqid = pbm->msiq_first + i; + + (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0); + } + + q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); + alloc_size = (pbm->msiq_num * q_size); + order = get_order(alloc_size); + + pages = (unsigned long) pbm->msi_queues; + + free_pages(pages, order); + + pbm->msi_queues = NULL; +} + +static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, + unsigned long msiqid, + unsigned long devino) +{ + unsigned int irq = sun4v_build_irq(pbm->devhandle, devino); + + if (!irq) + return -ENOMEM; + + if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) + return -EINVAL; + if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) + return -EINVAL; + + return irq; +} + +static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { + .get_head = pci_sun4v_get_head, + .dequeue_msi = pci_sun4v_dequeue_msi, + .set_head = pci_sun4v_set_head, + .msi_setup = pci_sun4v_msi_setup, + .msi_teardown = pci_sun4v_msi_teardown, + .msiq_alloc = pci_sun4v_msiq_alloc, + .msiq_free = pci_sun4v_msiq_free, + .msiq_build_irq = pci_sun4v_msiq_build_irq, +}; + +static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) +{ + sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops); +} +#else /* CONFIG_PCI_MSI */ +static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) +{ +} +#endif /* !(CONFIG_PCI_MSI) */ + +static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op, u32 devhandle) +{ + struct device_node *dp = op->dev.of_node; + int err; + + pbm->numa_node = of_node_to_nid(dp); + + pbm->pci_ops = &sun4v_pci_ops; + pbm->config_space_reg_bits = 12; + + pbm->index = pci_num_pbms++; + + pbm->op = op; + + pbm->devhandle = devhandle; + + pbm->name = dp->full_name; + + printk("%s: SUN4V PCI Bus Module\n", pbm->name); + printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node); + + pci_determine_mem_io_space(pbm); + + pci_get_pbm_props(pbm); + + err = pci_sun4v_iommu_init(pbm); + if (err) + return err; + + pci_sun4v_msi_init(pbm); + + pci_sun4v_scan_bus(pbm, &op->dev); + + /* if atu_init fails its not complete failure. + * we can still continue using legacy iommu. + */ + if (pbm->iommu->atu) { + err = pci_sun4v_atu_init(pbm); + if (err) { + kfree(pbm->iommu->atu); + pbm->iommu->atu = NULL; + pr_err(PFX "ATU init failed, err=%d\n", err); + } + } + + pbm->next = pci_pbm_root; + pci_pbm_root = pbm; + + return 0; +} + +static int pci_sun4v_probe(struct platform_device *op) +{ + const struct linux_prom64_registers *regs; + static int hvapi_negotiated = 0; + struct pci_pbm_info *pbm; + struct device_node *dp; + struct iommu *iommu; + struct atu *atu; + u32 devhandle; + int i, err = -ENODEV; + static bool hv_atu = true; + + dp = op->dev.of_node; + + if (!hvapi_negotiated++) { + for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) { + vpci_major = vpci_versions[i].major; + vpci_minor = vpci_versions[i].minor; + + err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major, + &vpci_minor); + if (!err) + break; + } + + if (err) { + pr_err(PFX "Could not register hvapi, err=%d\n", err); + return err; + } + pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", + vpci_major, vpci_minor); + + err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); + if (err) { + /* don't return an error if we fail to register the + * ATU group, but ATU hcalls won't be available. + */ + hv_atu = false; + } else { + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", + vatu_major, vatu_minor); + } + + dma_ops = &sun4v_dma_ops; + } + + regs = of_get_property(dp, "reg", NULL); + err = -ENODEV; + if (!regs) { + printk(KERN_ERR PFX "Could not find config registers\n"); + goto out_err; + } + devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; + + err = -ENOMEM; + if (!iommu_batch_initialized) { + for_each_possible_cpu(i) { + unsigned long page = get_zeroed_page(GFP_KERNEL); + + if (!page) + goto out_err; + + per_cpu(iommu_batch, i).pglist = (u64 *) page; + } + iommu_batch_initialized = 1; + } + + pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); + if (!pbm) { + printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n"); + goto out_err; + } + + iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); + if (!iommu) { + printk(KERN_ERR PFX "Could not allocate pbm iommu\n"); + goto out_free_controller; + } + + pbm->iommu = iommu; + iommu->atu = NULL; + if (hv_atu) { + atu = kzalloc(sizeof(*atu), GFP_KERNEL); + if (!atu) + pr_err(PFX "Could not allocate atu\n"); + else + iommu->atu = atu; + } + + err = pci_sun4v_pbm_init(pbm, op, devhandle); + if (err) + goto out_free_iommu; + + dev_set_drvdata(&op->dev, pbm); + + return 0; + +out_free_iommu: + kfree(iommu->atu); + kfree(pbm->iommu); + +out_free_controller: + kfree(pbm); + +out_err: + return err; +} + +static const struct of_device_id pci_sun4v_match[] = { + { + .name = "pci", + .compatible = "SUNW,sun4v-pci", + }, + {}, +}; + +static struct platform_driver pci_sun4v_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = pci_sun4v_match, + }, + .probe = pci_sun4v_probe, +}; + +static int __init pci_sun4v_init(void) +{ + return platform_driver_register(&pci_sun4v_driver); +} + +subsys_initcall(pci_sun4v_init); diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h new file mode 100644 index 000000000..d47263a99 --- /dev/null +++ b/arch/sparc/kernel/pci_sun4v.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* pci_sun4v.h: SUN4V specific PCI controller support. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ + +#ifndef _PCI_SUN4V_H +#define _PCI_SUN4V_H + +long pci_sun4v_iommu_map(unsigned long devhandle, + unsigned long tsbid, + unsigned long num_ttes, + unsigned long io_attributes, + unsigned long io_page_list_pa); +unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, + unsigned long tsbid, + unsigned long num_ttes); +unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, + unsigned long tsbid, + unsigned long *io_attributes, + unsigned long *real_address); +unsigned long pci_sun4v_config_get(unsigned long devhandle, + unsigned long pci_device, + unsigned long config_offset, + unsigned long size); +int pci_sun4v_config_put(unsigned long devhandle, + unsigned long pci_device, + unsigned long config_offset, + unsigned long size, + unsigned long data); + +unsigned long pci_sun4v_msiq_conf(unsigned long devhandle, + unsigned long msiqid, + unsigned long msiq_paddr, + unsigned long num_entries); +unsigned long pci_sun4v_msiq_info(unsigned long devhandle, + unsigned long msiqid, + unsigned long *msiq_paddr, + unsigned long *num_entries); +unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle, + unsigned long msiqid, + unsigned long *valid); +unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle, + unsigned long msiqid, + unsigned long valid); +unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle, + unsigned long msiqid, + unsigned long *state); +unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle, + unsigned long msiqid, + unsigned long state); +unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle, + unsigned long msiqid, + unsigned long *head); +unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle, + unsigned long msiqid, + unsigned long head); +unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle, + unsigned long msiqid, + unsigned long *head); +unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle, + unsigned long msinum, + unsigned long *valid); +unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle, + unsigned long msinum, + unsigned long valid); +unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle, + unsigned long msinum, + unsigned long *msiq); +unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle, + unsigned long msinum, + unsigned long msiq, + unsigned long msitype); +unsigned long pci_sun4v_msi_getstate(unsigned long devhandle, + unsigned long msinum, + unsigned long *state); +unsigned long pci_sun4v_msi_setstate(unsigned long devhandle, + unsigned long msinum, + unsigned long state); +unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle, + unsigned long msinum, + unsigned long *msiq); +unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle, + unsigned long msinum, + unsigned long msiq); +unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle, + unsigned long msinum, + unsigned long *valid); +unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, + unsigned long msinum, + unsigned long valid); + +/* Sun4v HV IOMMU v2 APIs */ +unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle, + unsigned long ra, + unsigned long table_size, + unsigned long page_size, + unsigned long dvma_base, + u64 *iotsb_num); +unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + unsigned int pci_device); +unsigned long pci_sun4v_iotsb_map(unsigned long devhandle, + unsigned long iotsb_num, + unsigned long iotsb_index_iottes, + unsigned long io_attributes, + unsigned long io_page_list_pa, + long *mapped); +unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle, + unsigned long iotsb_num, + unsigned long iotsb_index, + unsigned long iottes, + unsigned long *demapped); +#endif /* !(_PCI_SUN4V_H) */ diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S new file mode 100644 index 000000000..2b8051871 --- /dev/null +++ b/arch/sparc/kernel/pci_sun4v_asm.S @@ -0,0 +1,431 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* pci_sun4v_asm: Hypervisor calls for PCI support. + * + * Copyright (C) 2006, 2008 David S. Miller + */ + +#include +#include + + /* %o0: devhandle + * %o1: tsbid + * %o2: num ttes + * %o3: io_attributes + * %o4: io_page_list phys address + * + * returns %o0: -status if status was non-zero, else + * %o0: num pages mapped + */ +ENTRY(pci_sun4v_iommu_map) + mov %o5, %g1 + mov HV_FAST_PCI_IOMMU_MAP, %o5 + ta HV_FAST_TRAP + brnz,pn %o0, 1f + sub %g0, %o0, %o0 + mov %o1, %o0 +1: retl + nop +ENDPROC(pci_sun4v_iommu_map) + + /* %o0: devhandle + * %o1: tsbid + * %o2: num ttes + * + * returns %o0: num ttes demapped + */ +ENTRY(pci_sun4v_iommu_demap) + mov HV_FAST_PCI_IOMMU_DEMAP, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 +ENDPROC(pci_sun4v_iommu_demap) + + /* %o0: devhandle + * %o1: tsbid + * %o2: &io_attributes + * %o3: &real_address + * + * returns %o0: status + */ +ENTRY(pci_sun4v_iommu_getmap) + mov %o2, %o4 + mov HV_FAST_PCI_IOMMU_GETMAP, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + stx %o2, [%o3] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_iommu_getmap) + + /* %o0: devhandle + * %o1: pci_device + * %o2: pci_config_offset + * %o3: size + * + * returns %o0: data + * + * If there is an error, the data will be returned + * as all 1's. + */ +ENTRY(pci_sun4v_config_get) + mov HV_FAST_PCI_CONFIG_GET, %o5 + ta HV_FAST_TRAP + brnz,a,pn %o1, 1f + mov -1, %o2 +1: retl + mov %o2, %o0 +ENDPROC(pci_sun4v_config_get) + + /* %o0: devhandle + * %o1: pci_device + * %o2: pci_config_offset + * %o3: size + * %o4: data + * + * returns %o0: status + * + * status will be zero if the operation completed + * successfully, else -1 if not + */ +ENTRY(pci_sun4v_config_put) + mov HV_FAST_PCI_CONFIG_PUT, %o5 + ta HV_FAST_TRAP + brnz,a,pn %o1, 1f + mov -1, %o1 +1: retl + mov %o1, %o0 +ENDPROC(pci_sun4v_config_put) + + /* %o0: devhandle + * %o1: msiqid + * %o2: msiq phys address + * %o3: num entries + * + * returns %o0: status + * + * status will be zero if the operation completed + * successfully, else -1 if not + */ +ENTRY(pci_sun4v_msiq_conf) + mov HV_FAST_PCI_MSIQ_CONF, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_conf) + + /* %o0: devhandle + * %o1: msiqid + * %o2: &msiq_phys_addr + * %o3: &msiq_num_entries + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msiq_info) + mov %o2, %o4 + mov HV_FAST_PCI_MSIQ_INFO, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + stx %o2, [%o3] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_info) + + /* %o0: devhandle + * %o1: msiqid + * %o2: &valid + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msiq_getvalid) + mov HV_FAST_PCI_MSIQ_GETVALID, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_getvalid) + + /* %o0: devhandle + * %o1: msiqid + * %o2: valid + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msiq_setvalid) + mov HV_FAST_PCI_MSIQ_SETVALID, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_setvalid) + + /* %o0: devhandle + * %o1: msiqid + * %o2: &state + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msiq_getstate) + mov HV_FAST_PCI_MSIQ_GETSTATE, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_getstate) + + /* %o0: devhandle + * %o1: msiqid + * %o2: state + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msiq_setstate) + mov HV_FAST_PCI_MSIQ_SETSTATE, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_setstate) + + /* %o0: devhandle + * %o1: msiqid + * %o2: &head + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msiq_gethead) + mov HV_FAST_PCI_MSIQ_GETHEAD, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_gethead) + + /* %o0: devhandle + * %o1: msiqid + * %o2: head + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msiq_sethead) + mov HV_FAST_PCI_MSIQ_SETHEAD, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_sethead) + + /* %o0: devhandle + * %o1: msiqid + * %o2: &tail + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msiq_gettail) + mov HV_FAST_PCI_MSIQ_GETTAIL, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msiq_gettail) + + /* %o0: devhandle + * %o1: msinum + * %o2: &valid + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msi_getvalid) + mov HV_FAST_PCI_MSI_GETVALID, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msi_getvalid) + + /* %o0: devhandle + * %o1: msinum + * %o2: valid + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msi_setvalid) + mov HV_FAST_PCI_MSI_SETVALID, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msi_setvalid) + + /* %o0: devhandle + * %o1: msinum + * %o2: &msiq + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msi_getmsiq) + mov HV_FAST_PCI_MSI_GETMSIQ, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msi_getmsiq) + + /* %o0: devhandle + * %o1: msinum + * %o2: msitype + * %o3: msiq + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msi_setmsiq) + mov HV_FAST_PCI_MSI_SETMSIQ, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msi_setmsiq) + + /* %o0: devhandle + * %o1: msinum + * %o2: &state + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msi_getstate) + mov HV_FAST_PCI_MSI_GETSTATE, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msi_getstate) + + /* %o0: devhandle + * %o1: msinum + * %o2: state + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msi_setstate) + mov HV_FAST_PCI_MSI_SETSTATE, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msi_setstate) + + /* %o0: devhandle + * %o1: msinum + * %o2: &msiq + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msg_getmsiq) + mov HV_FAST_PCI_MSG_GETMSIQ, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msg_getmsiq) + + /* %o0: devhandle + * %o1: msinum + * %o2: msiq + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msg_setmsiq) + mov HV_FAST_PCI_MSG_SETMSIQ, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msg_setmsiq) + + /* %o0: devhandle + * %o1: msinum + * %o2: &valid + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msg_getvalid) + mov HV_FAST_PCI_MSG_GETVALID, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msg_getvalid) + + /* %o0: devhandle + * %o1: msinum + * %o2: valid + * + * returns %o0: status + */ +ENTRY(pci_sun4v_msg_setvalid) + mov HV_FAST_PCI_MSG_SETVALID, %o5 + ta HV_FAST_TRAP + retl + mov %o0, %o0 +ENDPROC(pci_sun4v_msg_setvalid) + + /* + * %o0: devhandle + * %o1: r_addr + * %o2: size + * %o3: pagesize + * %o4: virt + * %o5: &iotsb_num/&iotsb_handle + * + * returns %o0: status + * %o1: iotsb_num/iotsb_handle + */ +ENTRY(pci_sun4v_iotsb_conf) + mov %o5, %g1 + mov HV_FAST_PCI_IOTSB_CONF, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%g1] +ENDPROC(pci_sun4v_iotsb_conf) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: pci_device + * + * returns %o0: status + */ +ENTRY(pci_sun4v_iotsb_bind) + mov HV_FAST_PCI_IOTSB_BIND, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(pci_sun4v_iotsb_bind) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: index_count + * %o3: iotte_attributes + * %o4: io_page_list_p + * %o5: &mapped + * + * returns %o0: status + * %o1: #mapped + */ +ENTRY(pci_sun4v_iotsb_map) + mov %o5, %g1 + mov HV_FAST_PCI_IOTSB_MAP, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%g1] +ENDPROC(pci_sun4v_iotsb_map) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: iotsb_index + * %o3: #iottes + * %o4: &demapped + * + * returns %o0: status + * %o1: #demapped + */ +ENTRY(pci_sun4v_iotsb_demap) + mov HV_FAST_PCI_IOTSB_DEMAP, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%o4] +ENDPROC(pci_sun4v_iotsb_demap) diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c new file mode 100644 index 000000000..ee4c9a9a1 --- /dev/null +++ b/arch/sparc/kernel/pcic.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pcic.c: MicroSPARC-IIep PCI controller support + * + * Copyright (C) 1998 V. Roganov and G. Raiko + * + * Code is derived from Ultra/PCI PSYCHO controller support, see that + * for author info. + * + * Support for diverse IIep based platforms by Pete Zaitcev. + * CP-1200 by Eric Brower. + */ + +#include +#include +#include +#include +#include +#include + +#include /* for cache flushing. */ +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "irq.h" + +/* + * I studied different documents and many live PROMs both from 2.30 + * family and 3.xx versions. I came to the amazing conclusion: there is + * absolutely no way to route interrupts in IIep systems relying on + * information which PROM presents. We must hardcode interrupt routing + * schematics. And this actually sucks. -- zaitcev 1999/05/12 + * + * To find irq for a device we determine which routing map + * is in effect or, in other words, on which machine we are running. + * We use PROM name for this although other techniques may be used + * in special cases (Gleb reports a PROMless IIep based system). + * Once we know the map we take device configuration address and + * find PCIC pin number where INT line goes. Then we may either program + * preferred irq into the PCIC or supply the preexisting irq to the device. + */ +struct pcic_ca2irq { + unsigned char busno; /* PCI bus number */ + unsigned char devfn; /* Configuration address */ + unsigned char pin; /* PCIC external interrupt pin */ + unsigned char irq; /* Preferred IRQ (mappable in PCIC) */ + unsigned int force; /* Enforce preferred IRQ */ +}; + +struct pcic_sn2list { + char *sysname; + struct pcic_ca2irq *intmap; + int mapdim; +}; + +/* + * JavaEngine-1 apparently has different versions. + * + * According to communications with Sun folks, for P2 build 501-4628-03: + * pin 0 - parallel, audio; + * pin 1 - Ethernet; + * pin 2 - su; + * pin 3 - PS/2 kbd and mouse. + * + * OEM manual (805-1486): + * pin 0: Ethernet + * pin 1: All EBus + * pin 2: IGA (unused) + * pin 3: Not connected + * OEM manual says that 501-4628 & 501-4811 are the same thing, + * only the latter has NAND flash in place. + * + * So far unofficial Sun wins over the OEM manual. Poor OEMs... + */ +static struct pcic_ca2irq pcic_i_je1a[] = { /* 501-4811-03 */ + { 0, 0x00, 2, 12, 0 }, /* EBus: hogs all */ + { 0, 0x01, 1, 6, 1 }, /* Happy Meal */ + { 0, 0x80, 0, 7, 0 }, /* IGA (unused) */ +}; + +/* XXX JS-E entry is incomplete - PCI Slot 2 address (pin 7)? */ +static struct pcic_ca2irq pcic_i_jse[] = { + { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */ + { 0, 0x01, 1, 6, 0 }, /* hme */ + { 0, 0x08, 2, 9, 0 }, /* VGA - we hope not used :) */ + { 0, 0x10, 6, 8, 0 }, /* PCI INTA# in Slot 1 */ + { 0, 0x18, 7, 12, 0 }, /* PCI INTA# in Slot 2, shared w. RTC */ + { 0, 0x38, 4, 9, 0 }, /* All ISA devices. Read 8259. */ + { 0, 0x80, 5, 11, 0 }, /* EIDE */ + /* {0,0x88, 0,0,0} - unknown device... PMU? Probably no interrupt. */ + { 0, 0xA0, 4, 9, 0 }, /* USB */ + /* + * Some pins belong to non-PCI devices, we hardcode them in drivers. + * sun4m timers - irq 10, 14 + * PC style RTC - pin 7, irq 4 ? + * Smart card, Parallel - pin 4 shared with USB, ISA + * audio - pin 3, irq 5 ? + */ +}; + +/* SPARCengine-6 was the original release name of CP1200. + * The documentation differs between the two versions + */ +static struct pcic_ca2irq pcic_i_se6[] = { + { 0, 0x08, 0, 2, 0 }, /* SCSI */ + { 0, 0x01, 1, 6, 0 }, /* HME */ + { 0, 0x00, 3, 13, 0 }, /* EBus */ +}; + +/* + * Krups (courtesy of Varol Kaptan) + * No documentation available, but it was easy to guess + * because it was very similar to Espresso. + * + * pin 0 - kbd, mouse, serial; + * pin 1 - Ethernet; + * pin 2 - igs (we do not use it); + * pin 3 - audio; + * pin 4,5,6 - unused; + * pin 7 - RTC (from P2 onwards as David B. says). + */ +static struct pcic_ca2irq pcic_i_jk[] = { + { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */ + { 0, 0x01, 1, 6, 0 }, /* hme */ +}; + +/* + * Several entries in this list may point to the same routing map + * as several PROMs may be installed on the same physical board. + */ +#define SN2L_INIT(name, map) \ + { name, map, ARRAY_SIZE(map) } + +static struct pcic_sn2list pcic_known_sysnames[] = { + SN2L_INIT("SUNW,JavaEngine1", pcic_i_je1a), /* JE1, PROM 2.32 */ + SN2L_INIT("SUNW,JS-E", pcic_i_jse), /* PROLL JavaStation-E */ + SN2L_INIT("SUNW,SPARCengine-6", pcic_i_se6), /* SPARCengine-6/CP-1200 */ + SN2L_INIT("SUNW,JS-NC", pcic_i_jk), /* PROLL JavaStation-NC */ + SN2L_INIT("SUNW,JSIIep", pcic_i_jk), /* OBP JavaStation-NC */ + { NULL, NULL, 0 } +}; + +/* + * Only one PCIC per IIep, + * and since we have no SMP IIep, only one per system. + */ +static int pcic0_up; +static struct linux_pcic pcic0; + +void __iomem *pcic_regs; +static volatile int pcic_speculative; +static volatile int pcic_trapped; + +/* forward */ +unsigned int pcic_build_device_irq(struct platform_device *op, + unsigned int real_irq); + +#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) + +static int pcic_read_config_dword(unsigned int busno, unsigned int devfn, + int where, u32 *value) +{ + struct linux_pcic *pcic; + unsigned long flags; + + pcic = &pcic0; + + local_irq_save(flags); +#if 0 /* does not fail here */ + pcic_speculative = 1; + pcic_trapped = 0; +#endif + writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr); +#if 0 /* does not fail here */ + nop(); + if (pcic_trapped) { + local_irq_restore(flags); + *value = ~0; + return 0; + } +#endif + pcic_speculative = 2; + pcic_trapped = 0; + *value = readl(pcic->pcic_config_space_data + (where&4)); + nop(); + if (pcic_trapped) { + pcic_speculative = 0; + local_irq_restore(flags); + *value = ~0; + return 0; + } + pcic_speculative = 0; + local_irq_restore(flags); + return 0; +} + +static int pcic_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + unsigned int v; + + if (bus->number != 0) return -EINVAL; + switch (size) { + case 1: + pcic_read_config_dword(bus->number, devfn, where&~3, &v); + *val = 0xff & (v >> (8*(where & 3))); + return 0; + case 2: + if (where&1) return -EINVAL; + pcic_read_config_dword(bus->number, devfn, where&~3, &v); + *val = 0xffff & (v >> (8*(where & 3))); + return 0; + case 4: + if (where&3) return -EINVAL; + pcic_read_config_dword(bus->number, devfn, where&~3, val); + return 0; + } + return -EINVAL; +} + +static int pcic_write_config_dword(unsigned int busno, unsigned int devfn, + int where, u32 value) +{ + struct linux_pcic *pcic; + unsigned long flags; + + pcic = &pcic0; + + local_irq_save(flags); + writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr); + writel(value, pcic->pcic_config_space_data + (where&4)); + local_irq_restore(flags); + return 0; +} + +static int pcic_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + unsigned int v; + + if (bus->number != 0) return -EINVAL; + switch (size) { + case 1: + pcic_read_config_dword(bus->number, devfn, where&~3, &v); + v = (v & ~(0xff << (8*(where&3)))) | + ((0xff&val) << (8*(where&3))); + return pcic_write_config_dword(bus->number, devfn, where&~3, v); + case 2: + if (where&1) return -EINVAL; + pcic_read_config_dword(bus->number, devfn, where&~3, &v); + v = (v & ~(0xffff << (8*(where&3)))) | + ((0xffff&val) << (8*(where&3))); + return pcic_write_config_dword(bus->number, devfn, where&~3, v); + case 4: + if (where&3) return -EINVAL; + return pcic_write_config_dword(bus->number, devfn, where, val); + } + return -EINVAL; +} + +static struct pci_ops pcic_ops = { + .read = pcic_read_config, + .write = pcic_write_config, +}; + +/* + * On sparc64 pcibios_init() calls pci_controller_probe(). + * We want PCIC probed little ahead so that interrupt controller + * would be operational. + */ +int __init pcic_probe(void) +{ + struct linux_pcic *pcic; + struct linux_prom_registers regs[PROMREG_MAX]; + struct linux_pbm_info* pbm; + char namebuf[64]; + phandle node; + int err; + + if (pcic0_up) { + prom_printf("PCIC: called twice!\n"); + prom_halt(); + } + pcic = &pcic0; + + node = prom_getchild (prom_root_node); + node = prom_searchsiblings (node, "pci"); + if (node == 0) + return -ENODEV; + /* + * Map in PCIC register set, config space, and IO base + */ + err = prom_getproperty(node, "reg", (char*)regs, sizeof(regs)); + if (err == 0 || err == -1) { + prom_printf("PCIC: Error, cannot get PCIC registers " + "from PROM.\n"); + prom_halt(); + } + + pcic0_up = 1; + + pcic->pcic_res_regs.name = "pcic_registers"; + pcic->pcic_regs = ioremap(regs[0].phys_addr, regs[0].reg_size); + if (!pcic->pcic_regs) { + prom_printf("PCIC: Error, cannot map PCIC registers.\n"); + prom_halt(); + } + + pcic->pcic_res_io.name = "pcic_io"; + if ((pcic->pcic_io = (unsigned long) + ioremap(regs[1].phys_addr, 0x10000)) == 0) { + prom_printf("PCIC: Error, cannot map PCIC IO Base.\n"); + prom_halt(); + } + + pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr"; + if ((pcic->pcic_config_space_addr = + ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == NULL) { + prom_printf("PCIC: Error, cannot map " + "PCI Configuration Space Address.\n"); + prom_halt(); + } + + /* + * Docs say three least significant bits in address and data + * must be the same. Thus, we need adjust size of data. + */ + pcic->pcic_res_cfg_data.name = "pcic_cfg_data"; + if ((pcic->pcic_config_space_data = + ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == NULL) { + prom_printf("PCIC: Error, cannot map " + "PCI Configuration Space Data.\n"); + prom_halt(); + } + + pbm = &pcic->pbm; + pbm->prom_node = node; + prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0; + strcpy(pbm->prom_name, namebuf); + + { + extern int pcic_nmi_trap_patch[4]; + + t_nmi[0] = pcic_nmi_trap_patch[0]; + t_nmi[1] = pcic_nmi_trap_patch[1]; + t_nmi[2] = pcic_nmi_trap_patch[2]; + t_nmi[3] = pcic_nmi_trap_patch[3]; + swift_flush_dcache(); + pcic_regs = pcic->pcic_regs; + } + + prom_getstring(prom_root_node, "name", namebuf, 63); namebuf[63] = 0; + { + struct pcic_sn2list *p; + + for (p = pcic_known_sysnames; p->sysname != NULL; p++) { + if (strcmp(namebuf, p->sysname) == 0) + break; + } + pcic->pcic_imap = p->intmap; + pcic->pcic_imdim = p->mapdim; + } + if (pcic->pcic_imap == NULL) { + /* + * We do not panic here for the sake of embedded systems. + */ + printk("PCIC: System %s is unknown, cannot route interrupts\n", + namebuf); + } + + return 0; +} + +static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic) +{ + struct linux_pbm_info *pbm = &pcic->pbm; + + pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm); + if (!pbm->pci_bus) + return; + +#if 0 /* deadwood transplanted from sparc64 */ + pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); + pci_record_assignments(pbm, pbm->pci_bus); + pci_assign_unassigned(pbm, pbm->pci_bus); + pci_fixup_irq(pbm, pbm->pci_bus); +#endif + pci_bus_add_devices(pbm->pci_bus); +} + +/* + * Main entry point from the PCI subsystem. + */ +static int __init pcic_init(void) +{ + struct linux_pcic *pcic; + + /* + * PCIC should be initialized at start of the timer. + * So, here we report the presence of PCIC and do some magic passes. + */ + if(!pcic0_up) + return 0; + pcic = &pcic0; + + /* + * Switch off IOTLB translation. + */ + writeb(PCI_DVMA_CONTROL_IOTLB_DISABLE, + pcic->pcic_regs+PCI_DVMA_CONTROL); + + /* + * Increase mapped size for PCI memory space (DMA access). + * Should be done in that order (size first, address second). + * Why we couldn't set up 4GB and forget about it? XXX + */ + writel(0xF0000000UL, pcic->pcic_regs+PCI_SIZE_0); + writel(0+PCI_BASE_ADDRESS_SPACE_MEMORY, + pcic->pcic_regs+PCI_BASE_ADDRESS_0); + + pcic_pbm_scan_bus(pcic); + + return 0; +} + +int pcic_present(void) +{ + return pcic0_up; +} + +static int pdev_to_pnode(struct linux_pbm_info *pbm, struct pci_dev *pdev) +{ + struct linux_prom_pci_registers regs[PROMREG_MAX]; + int err; + phandle node = prom_getchild(pbm->prom_node); + + while(node) { + err = prom_getproperty(node, "reg", + (char *)®s[0], sizeof(regs)); + if(err != 0 && err != -1) { + unsigned long devfn = (regs[0].which_io >> 8) & 0xff; + if(devfn == pdev->devfn) + return node; + } + node = prom_getsibling(node); + } + return 0; +} + +static inline struct pcidev_cookie *pci_devcookie_alloc(void) +{ + return kmalloc(sizeof(struct pcidev_cookie), GFP_ATOMIC); +} + +static void pcic_map_pci_device(struct linux_pcic *pcic, + struct pci_dev *dev, int node) +{ + char namebuf[64]; + unsigned long address; + unsigned long flags; + int j; + + if (node == 0 || node == -1) { + strcpy(namebuf, "???"); + } else { + prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0; + } + + for (j = 0; j < 6; j++) { + address = dev->resource[j].start; + if (address == 0) break; /* are sequential */ + flags = dev->resource[j].flags; + if ((flags & IORESOURCE_IO) != 0) { + if (address < 0x10000) { + /* + * A device responds to I/O cycles on PCI. + * We generate these cycles with memory + * access into the fixed map (phys 0x30000000). + * + * Since a device driver does not want to + * do ioremap() before accessing PC-style I/O, + * we supply virtual, ready to access address. + * + * Note that request_region() + * works for these devices. + * + * XXX Neat trick, but it's a *bad* idea + * to shit into regions like that. + * What if we want to allocate one more + * PCI base address... + */ + dev->resource[j].start = + pcic->pcic_io + address; + dev->resource[j].end = 1; /* XXX */ + dev->resource[j].flags = + (flags & ~IORESOURCE_IO) | IORESOURCE_MEM; + } else { + /* + * OOPS... PCI Spec allows this. Sun does + * not have any devices getting above 64K + * so it must be user with a weird I/O + * board in a PCI slot. We must remap it + * under 64K but it is not done yet. XXX + */ + pci_info(dev, "PCIC: Skipping I/O space at " + "0x%lx, this will Oops if a driver " + "attaches device '%s'\n", address, + namebuf); + } + } + } +} + +static void +pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) +{ + struct pcic_ca2irq *p; + unsigned int real_irq; + int i, ivec; + char namebuf[64]; + + if (node == 0 || node == -1) { + strcpy(namebuf, "???"); + } else { + prom_getstring(node, "name", namebuf, sizeof(namebuf)); + } + + if ((p = pcic->pcic_imap) == NULL) { + dev->irq = 0; + return; + } + for (i = 0; i < pcic->pcic_imdim; i++) { + if (p->busno == dev->bus->number && p->devfn == dev->devfn) + break; + p++; + } + if (i >= pcic->pcic_imdim) { + pci_info(dev, "PCIC: device %s not found in %d\n", namebuf, + pcic->pcic_imdim); + dev->irq = 0; + return; + } + + i = p->pin; + if (i >= 0 && i < 4) { + ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); + real_irq = ivec >> (i << 2) & 0xF; + } else if (i >= 4 && i < 8) { + ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); + real_irq = ivec >> ((i-4) << 2) & 0xF; + } else { /* Corrupted map */ + pci_info(dev, "PCIC: BAD PIN %d\n", i); for (;;) {} + } +/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */ + + /* real_irq means PROM did not bother to program the upper + * half of PCIC. This happens on JS-E with PROM 3.11, for instance. + */ + if (real_irq == 0 || p->force) { + if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */ + pci_info(dev, "PCIC: BAD IRQ %d\n", p->irq); for (;;) {} + } + pci_info(dev, "PCIC: setting irq %d at pin %d\n", p->irq, + p->pin); + real_irq = p->irq; + + i = p->pin; + if (i >= 4) { + ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); + ivec &= ~(0xF << ((i - 4) << 2)); + ivec |= p->irq << ((i - 4) << 2); + writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_HI); + } else { + ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); + ivec &= ~(0xF << (i << 2)); + ivec |= p->irq << (i << 2); + writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO); + } + } + dev->irq = pcic_build_device_irq(NULL, real_irq); +} + +/* + * Normally called from {do_}pci_scan_bus... + */ +void pcibios_fixup_bus(struct pci_bus *bus) +{ + struct pci_dev *dev; + struct linux_pcic *pcic; + /* struct linux_pbm_info* pbm = &pcic->pbm; */ + int node; + struct pcidev_cookie *pcp; + + if (!pcic0_up) { + pci_info(bus, "pcibios_fixup_bus: no PCIC\n"); + return; + } + pcic = &pcic0; + + /* + * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus); + */ + if (bus->number != 0) { + pci_info(bus, "pcibios_fixup_bus: nonzero bus 0x%x\n", + bus->number); + return; + } + + list_for_each_entry(dev, &bus->devices, bus_list) { + node = pdev_to_pnode(&pcic->pbm, dev); + if(node == 0) + node = -1; + + /* cookies */ + pcp = pci_devcookie_alloc(); + pcp->pbm = &pcic->pbm; + pcp->prom_node = of_find_node_by_phandle(node); + dev->sysdata = pcp; + + /* fixing I/O to look like memory */ + if ((dev->class>>16) != PCI_BASE_CLASS_BRIDGE) + pcic_map_pci_device(pcic, dev, node); + + pcic_fill_irq(pcic, dev, node); + } +} + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + u16 cmd, oldcmd; + int i; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + oldcmd = cmd; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + /* Only set up the requested stuff */ + if (!(mask & (1<flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (res->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + + if (cmd != oldcmd) { + pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + +/* Makes compiler happy */ +static volatile int pcic_timer_dummy; + +static void pcic_clear_clock_irq(void) +{ + pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT); +} + +/* CPU frequency is 100 MHz, timer increments every 4 CPU clocks */ +#define USECS_PER_JIFFY (1000000 / HZ) +#define TICK_TIMER_LIMIT ((100 * 1000000 / 4) / HZ) + +static unsigned int pcic_cycles_offset(void) +{ + u32 value, count; + + value = readl(pcic0.pcic_regs + PCI_SYS_COUNTER); + count = value & ~PCI_SYS_COUNTER_OVERFLOW; + + if (value & PCI_SYS_COUNTER_OVERFLOW) + count += TICK_TIMER_LIMIT; + /* + * We divide all by HZ + * to have microsecond resolution and to avoid overflow + */ + count = ((count / HZ) * USECS_PER_JIFFY) / (TICK_TIMER_LIMIT / HZ); + + /* Coordinate with the sparc_config.clock_rate setting */ + return count * 2; +} + +void __init pci_time_init(void) +{ + struct linux_pcic *pcic = &pcic0; + unsigned long v; + int timer_irq, irq; + int err; + +#ifndef CONFIG_SMP + /* + * The clock_rate is in SBUS dimension. + * We take into account this in pcic_cycles_offset() + */ + sparc_config.clock_rate = SBUS_CLOCK_RATE / HZ; + sparc_config.features |= FEAT_L10_CLOCKEVENT; +#endif + sparc_config.features |= FEAT_L10_CLOCKSOURCE; + sparc_config.get_cycles_offset = pcic_cycles_offset; + + writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); + /* PROM should set appropriate irq */ + v = readb(pcic->pcic_regs+PCI_COUNTER_IRQ); + timer_irq = PCI_COUNTER_IRQ_SYS(v); + writel (PCI_COUNTER_IRQ_SET(timer_irq, 0), + pcic->pcic_regs+PCI_COUNTER_IRQ); + irq = pcic_build_device_irq(NULL, timer_irq); + err = request_irq(irq, timer_interrupt, + IRQF_TIMER, "timer", NULL); + if (err) { + prom_printf("time_init: unable to attach IRQ%d\n", timer_irq); + prom_halt(); + } + local_irq_enable(); +} + + +#if 0 +static void watchdog_reset() { + writeb(0, pcic->pcic_regs+PCI_SYS_STATUS); +} +#endif + +/* + * NMI + */ +void pcic_nmi(unsigned int pend, struct pt_regs *regs) +{ + pend = swab32(pend); + + if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) { + /* + * XXX On CP-1200 PCI #SERR may happen, we do not know + * what to do about it yet. + */ + printk("Aiee, NMI pend 0x%x pc 0x%x spec %d, hanging\n", + pend, (int)regs->pc, pcic_speculative); + for (;;) { } + } + pcic_speculative = 0; + pcic_trapped = 1; + regs->pc = regs->npc; + regs->npc += 4; +} + +static inline unsigned long get_irqmask(int irq_nr) +{ + return 1 << irq_nr; +} + +static void pcic_mask_irq(struct irq_data *data) +{ + unsigned long mask, flags; + + mask = (unsigned long)data->chip_data; + local_irq_save(flags); + writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); + local_irq_restore(flags); +} + +static void pcic_unmask_irq(struct irq_data *data) +{ + unsigned long mask, flags; + + mask = (unsigned long)data->chip_data; + local_irq_save(flags); + writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); + local_irq_restore(flags); +} + +static unsigned int pcic_startup_irq(struct irq_data *data) +{ + irq_link(data->irq); + pcic_unmask_irq(data); + return 0; +} + +static struct irq_chip pcic_irq = { + .name = "pcic", + .irq_startup = pcic_startup_irq, + .irq_mask = pcic_mask_irq, + .irq_unmask = pcic_unmask_irq, +}; + +unsigned int pcic_build_device_irq(struct platform_device *op, + unsigned int real_irq) +{ + unsigned int irq; + unsigned long mask; + + irq = 0; + mask = get_irqmask(real_irq); + if (mask == 0) + goto out; + + irq = irq_alloc(real_irq, real_irq); + if (irq == 0) + goto out; + + irq_set_chip_and_handler_name(irq, &pcic_irq, + handle_level_irq, "PCIC"); + irq_set_chip_data(irq, (void *)mask); + +out: + return irq; +} + + +static void pcic_load_profile_irq(int cpu, unsigned int limit) +{ + printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__); +} + +void __init sun4m_pci_init_IRQ(void) +{ + sparc_config.build_device_irq = pcic_build_device_irq; + sparc_config.clear_clock_irq = pcic_clear_clock_irq; + sparc_config.load_profile_irq = pcic_load_profile_irq; +} + +subsys_initcall(pcic_init); diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c new file mode 100644 index 000000000..2a12c86af --- /dev/null +++ b/arch/sparc/kernel/pcr.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* pcr.c: Generic sparc64 performance counter infrastructure. + * + * Copyright (C) 2009 David S. Miller (davem@davemloft.net) + */ +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +/* This code is shared between various users of the performance + * counters. Users will be oprofile, pseudo-NMI watchdog, and the + * perf_event support layer. + */ + +/* Performance counter interrupts run unmasked at PIL level 15. + * Therefore we can't do things like wakeups and other work + * that expects IRQ disabling to be adhered to in locking etc. + * + * Therefore in such situations we defer the work by signalling + * a lower level cpu IRQ. + */ +void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + + clear_softint(1 << PIL_DEFERRED_PCR_WORK); + + old_regs = set_irq_regs(regs); + irq_enter(); +#ifdef CONFIG_IRQ_WORK + irq_work_run(); +#endif + irq_exit(); + set_irq_regs(old_regs); +} + +void arch_irq_work_raise(void) +{ + set_softint(1 << PIL_DEFERRED_PCR_WORK); +} + +const struct pcr_ops *pcr_ops; +EXPORT_SYMBOL_GPL(pcr_ops); + +static u64 direct_pcr_read(unsigned long reg_num) +{ + u64 val; + + WARN_ON_ONCE(reg_num != 0); + __asm__ __volatile__("rd %%pcr, %0" : "=r" (val)); + return val; +} + +static void direct_pcr_write(unsigned long reg_num, u64 val) +{ + WARN_ON_ONCE(reg_num != 0); + __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val)); +} + +static u64 direct_pic_read(unsigned long reg_num) +{ + u64 val; + + WARN_ON_ONCE(reg_num != 0); + __asm__ __volatile__("rd %%pic, %0" : "=r" (val)); + return val; +} + +static void direct_pic_write(unsigned long reg_num, u64 val) +{ + WARN_ON_ONCE(reg_num != 0); + + /* Blackbird errata workaround. See commentary in + * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() + * for more information. + */ + __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" + " nop\n\t" + ".align 64\n" + "99:wr %0, 0x0, %%pic\n\t" + "rd %%pic, %%g0" : : "r" (val)); +} + +static u64 direct_picl_value(unsigned int nmi_hz) +{ + u32 delta = local_cpu_data().clock_tick / nmi_hz; + + return ((u64)((0 - delta) & 0xffffffff)) << 32; +} + +static const struct pcr_ops direct_pcr_ops = { + .read_pcr = direct_pcr_read, + .write_pcr = direct_pcr_write, + .read_pic = direct_pic_read, + .write_pic = direct_pic_write, + .nmi_picl_value = direct_picl_value, + .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE), + .pcr_nmi_disable = PCR_PIC_PRIV, +}; + +static void n2_pcr_write(unsigned long reg_num, u64 val) +{ + unsigned long ret; + + WARN_ON_ONCE(reg_num != 0); + if (val & PCR_N2_HTRACE) { + ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val); + if (ret != HV_EOK) + direct_pcr_write(reg_num, val); + } else + direct_pcr_write(reg_num, val); +} + +static u64 n2_picl_value(unsigned int nmi_hz) +{ + u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2); + + return ((u64)((0 - delta) & 0xffffffff)) << 32; +} + +static const struct pcr_ops n2_pcr_ops = { + .read_pcr = direct_pcr_read, + .write_pcr = n2_pcr_write, + .read_pic = direct_pic_read, + .write_pic = direct_pic_write, + .nmi_picl_value = n2_picl_value, + .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | + PCR_N2_TOE_OV1 | + (2 << PCR_N2_SL1_SHIFT) | + (0xff << PCR_N2_MASK1_SHIFT)), + .pcr_nmi_disable = PCR_PIC_PRIV, +}; + +static u64 n4_pcr_read(unsigned long reg_num) +{ + unsigned long val; + + (void) sun4v_vt_get_perfreg(reg_num, &val); + + return val; +} + +static void n4_pcr_write(unsigned long reg_num, u64 val) +{ + (void) sun4v_vt_set_perfreg(reg_num, val); +} + +static u64 n4_pic_read(unsigned long reg_num) +{ + unsigned long val; + + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=r" (val) + : "r" (reg_num * 0x8UL), "i" (ASI_PIC)); + + return val; +} + +static void n4_pic_write(unsigned long reg_num, u64 val) +{ + __asm__ __volatile__("stxa %0, [%1] %2" + : /* no outputs */ + : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC)); +} + +static u64 n4_picl_value(unsigned int nmi_hz) +{ + u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2); + + return ((u64)((0 - delta) & 0xffffffff)); +} + +static const struct pcr_ops n4_pcr_ops = { + .read_pcr = n4_pcr_read, + .write_pcr = n4_pcr_write, + .read_pic = n4_pic_read, + .write_pic = n4_pic_write, + .nmi_picl_value = n4_picl_value, + .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | + PCR_N4_UTRACE | PCR_N4_TOE | + (26 << PCR_N4_SL_SHIFT)), + .pcr_nmi_disable = PCR_N4_PICNPT, +}; + +static u64 n5_pcr_read(unsigned long reg_num) +{ + unsigned long val; + + (void) sun4v_t5_get_perfreg(reg_num, &val); + + return val; +} + +static void n5_pcr_write(unsigned long reg_num, u64 val) +{ + (void) sun4v_t5_set_perfreg(reg_num, val); +} + +static const struct pcr_ops n5_pcr_ops = { + .read_pcr = n5_pcr_read, + .write_pcr = n5_pcr_write, + .read_pic = n4_pic_read, + .write_pic = n4_pic_write, + .nmi_picl_value = n4_picl_value, + .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | + PCR_N4_UTRACE | PCR_N4_TOE | + (26 << PCR_N4_SL_SHIFT)), + .pcr_nmi_disable = PCR_N4_PICNPT, +}; + +static u64 m7_pcr_read(unsigned long reg_num) +{ + unsigned long val; + + (void) sun4v_m7_get_perfreg(reg_num, &val); + + return val; +} + +static void m7_pcr_write(unsigned long reg_num, u64 val) +{ + (void) sun4v_m7_set_perfreg(reg_num, val); +} + +static const struct pcr_ops m7_pcr_ops = { + .read_pcr = m7_pcr_read, + .write_pcr = m7_pcr_write, + .read_pic = n4_pic_read, + .write_pic = n4_pic_write, + .nmi_picl_value = n4_picl_value, + .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | + PCR_N4_UTRACE | PCR_N4_TOE | + (26 << PCR_N4_SL_SHIFT)), + .pcr_nmi_disable = PCR_N4_PICNPT, +}; + +static unsigned long perf_hsvc_group; +static unsigned long perf_hsvc_major; +static unsigned long perf_hsvc_minor; + +static int __init register_perf_hsvc(void) +{ + unsigned long hverror; + + if (tlb_type == hypervisor) { + switch (sun4v_chip_type) { + case SUN4V_CHIP_NIAGARA1: + perf_hsvc_group = HV_GRP_NIAG_PERF; + break; + + case SUN4V_CHIP_NIAGARA2: + perf_hsvc_group = HV_GRP_N2_CPU; + break; + + case SUN4V_CHIP_NIAGARA3: + perf_hsvc_group = HV_GRP_KT_CPU; + break; + + case SUN4V_CHIP_NIAGARA4: + perf_hsvc_group = HV_GRP_VT_CPU; + break; + + case SUN4V_CHIP_NIAGARA5: + perf_hsvc_group = HV_GRP_T5_CPU; + break; + + case SUN4V_CHIP_SPARC_M7: + perf_hsvc_group = HV_GRP_M7_PERF; + break; + + default: + return -ENODEV; + } + + + perf_hsvc_major = 1; + perf_hsvc_minor = 0; + hverror = sun4v_hvapi_register(perf_hsvc_group, + perf_hsvc_major, + &perf_hsvc_minor); + if (hverror) { + pr_err("perfmon: Could not register hvapi(0x%lx).\n", + hverror); + return -ENODEV; + } + } + return 0; +} + +static void __init unregister_perf_hsvc(void) +{ + if (tlb_type != hypervisor) + return; + sun4v_hvapi_unregister(perf_hsvc_group); +} + +static int __init setup_sun4v_pcr_ops(void) +{ + int ret = 0; + + switch (sun4v_chip_type) { + case SUN4V_CHIP_NIAGARA1: + case SUN4V_CHIP_NIAGARA2: + case SUN4V_CHIP_NIAGARA3: + pcr_ops = &n2_pcr_ops; + break; + + case SUN4V_CHIP_NIAGARA4: + pcr_ops = &n4_pcr_ops; + break; + + case SUN4V_CHIP_NIAGARA5: + pcr_ops = &n5_pcr_ops; + break; + + case SUN4V_CHIP_SPARC_M7: + pcr_ops = &m7_pcr_ops; + break; + + default: + ret = -ENODEV; + break; + } + + return ret; +} + +int __init pcr_arch_init(void) +{ + int err = register_perf_hsvc(); + + if (err) + return err; + + switch (tlb_type) { + case hypervisor: + err = setup_sun4v_pcr_ops(); + if (err) + goto out_unregister; + break; + + case cheetah: + case cheetah_plus: + pcr_ops = &direct_pcr_ops; + break; + + case spitfire: + /* UltraSPARC-I/II and derivatives lack a profile + * counter overflow interrupt so we can't make use of + * their hardware currently. + */ + fallthrough; + default: + err = -ENODEV; + goto out_unregister; + } + + return nmi_init(); + +out_unregister: + unregister_perf_hsvc(); + return err; +} diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c new file mode 100644 index 000000000..a58ae9c42 --- /dev/null +++ b/arch/sparc/kernel/perf_event.c @@ -0,0 +1,1877 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Performance event support for sparc64. + * + * Copyright (C) 2009, 2010 David S. Miller + * + * This code is based almost entirely upon the x86 perf event + * code, which is: + * + * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "kstack.h" + +/* Two classes of sparc64 chips currently exist. All of which have + * 32-bit counters which can generate overflow interrupts on the + * transition from 0xffffffff to 0. + * + * All chips upto and including SPARC-T3 have two performance + * counters. The two 32-bit counters are accessed in one go using a + * single 64-bit register. + * + * On these older chips both counters are controlled using a single + * control register. The only way to stop all sampling is to clear + * all of the context (user, supervisor, hypervisor) sampling enable + * bits. But these bits apply to both counters, thus the two counters + * can't be enabled/disabled individually. + * + * Furthermore, the control register on these older chips have two + * event fields, one for each of the two counters. It's thus nearly + * impossible to have one counter going while keeping the other one + * stopped. Therefore it is possible to get overflow interrupts for + * counters not currently "in use" and that condition must be checked + * in the overflow interrupt handler. + * + * So we use a hack, in that we program inactive counters with the + * "sw_count0" and "sw_count1" events. These count how many times + * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an + * unusual way to encode a NOP and therefore will not trigger in + * normal code. + * + * Starting with SPARC-T4 we have one control register per counter. + * And the counters are stored in individual registers. The registers + * for the counters are 64-bit but only a 32-bit counter is + * implemented. The event selections on SPARC-T4 lack any + * restrictions, therefore we can elide all of the complicated + * conflict resolution code we have for SPARC-T3 and earlier chips. + */ + +#define MAX_HWEVENTS 4 +#define MAX_PCRS 4 +#define MAX_PERIOD ((1UL << 32) - 1) + +#define PIC_UPPER_INDEX 0 +#define PIC_LOWER_INDEX 1 +#define PIC_NO_INDEX -1 + +struct cpu_hw_events { + /* Number of events currently scheduled onto this cpu. + * This tells how many entries in the arrays below + * are valid. + */ + int n_events; + + /* Number of new events added since the last hw_perf_disable(). + * This works because the perf event layer always adds new + * events inside of a perf_{disable,enable}() sequence. + */ + int n_added; + + /* Array of events current scheduled on this cpu. */ + struct perf_event *event[MAX_HWEVENTS]; + + /* Array of encoded longs, specifying the %pcr register + * encoding and the mask of PIC counters this even can + * be scheduled on. See perf_event_encode() et al. + */ + unsigned long events[MAX_HWEVENTS]; + + /* The current counter index assigned to an event. When the + * event hasn't been programmed into the cpu yet, this will + * hold PIC_NO_INDEX. The event->hw.idx value tells us where + * we ought to schedule the event. + */ + int current_idx[MAX_HWEVENTS]; + + /* Software copy of %pcr register(s) on this cpu. */ + u64 pcr[MAX_HWEVENTS]; + + /* Enabled/disable state. */ + int enabled; + + unsigned int txn_flags; +}; +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; + +/* An event map describes the characteristics of a performance + * counter event. In particular it gives the encoding as well as + * a mask telling which counters the event can be measured on. + * + * The mask is unused on SPARC-T4 and later. + */ +struct perf_event_map { + u16 encoding; + u8 pic_mask; +#define PIC_NONE 0x00 +#define PIC_UPPER 0x01 +#define PIC_LOWER 0x02 +}; + +/* Encode a perf_event_map entry into a long. */ +static unsigned long perf_event_encode(const struct perf_event_map *pmap) +{ + return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; +} + +static u8 perf_event_get_msk(unsigned long val) +{ + return val & 0xff; +} + +static u64 perf_event_get_enc(unsigned long val) +{ + return val >> 16; +} + +#define C(x) PERF_COUNT_HW_CACHE_##x + +#define CACHE_OP_UNSUPPORTED 0xfffe +#define CACHE_OP_NONSENSE 0xffff + +typedef struct perf_event_map cache_map_t + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + +struct sparc_pmu { + const struct perf_event_map *(*event_map)(int); + const cache_map_t *cache_map; + int max_events; + u32 (*read_pmc)(int); + void (*write_pmc)(int, u64); + int upper_shift; + int lower_shift; + int event_mask; + int user_bit; + int priv_bit; + int hv_bit; + int irq_bit; + int upper_nop; + int lower_nop; + unsigned int flags; +#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001 +#define SPARC_PMU_HAS_CONFLICTS 0x00000002 + int max_hw_events; + int num_pcrs; + int num_pic_regs; +}; + +static u32 sparc_default_read_pmc(int idx) +{ + u64 val; + + val = pcr_ops->read_pic(0); + if (idx == PIC_UPPER_INDEX) + val >>= 32; + + return val & 0xffffffff; +} + +static void sparc_default_write_pmc(int idx, u64 val) +{ + u64 shift, mask, pic; + + shift = 0; + if (idx == PIC_UPPER_INDEX) + shift = 32; + + mask = ((u64) 0xffffffff) << shift; + val <<= shift; + + pic = pcr_ops->read_pic(0); + pic &= ~mask; + pic |= val; + pcr_ops->write_pic(0, pic); +} + +static const struct perf_event_map ultra3_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, +}; + +static const struct perf_event_map *ultra3_event_map(int event_id) +{ + return &ultra3_perfmon_event_map[event_id]; +} + +static const cache_map_t ultra3_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, + [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, + [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static const struct sparc_pmu ultra3_pmu = { + .event_map = ultra3_event_map, + .cache_map = &ultra3_cache_map, + .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), + .read_pmc = sparc_default_read_pmc, + .write_pmc = sparc_default_write_pmc, + .upper_shift = 11, + .lower_shift = 4, + .event_mask = 0x3f, + .user_bit = PCR_UTRACE, + .priv_bit = PCR_STRACE, + .upper_nop = 0x1c, + .lower_nop = 0x14, + .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | + SPARC_PMU_HAS_CONFLICTS), + .max_hw_events = 2, + .num_pcrs = 1, + .num_pic_regs = 1, +}; + +/* Niagara1 is very limited. The upper PIC is hard-locked to count + * only instructions, so it is free running which creates all kinds of + * problems. Some hardware designs make one wonder if the creator + * even looked at how this stuff gets used by software. + */ +static const struct perf_event_map niagara1_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, +}; + +static const struct perf_event_map *niagara1_event_map(int event_id) +{ + return &niagara1_perfmon_event_map[event_id]; +} + +static const cache_map_t niagara1_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, + [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static const struct sparc_pmu niagara1_pmu = { + .event_map = niagara1_event_map, + .cache_map = &niagara1_cache_map, + .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), + .read_pmc = sparc_default_read_pmc, + .write_pmc = sparc_default_write_pmc, + .upper_shift = 0, + .lower_shift = 4, + .event_mask = 0x7, + .user_bit = PCR_UTRACE, + .priv_bit = PCR_STRACE, + .upper_nop = 0x0, + .lower_nop = 0x0, + .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | + SPARC_PMU_HAS_CONFLICTS), + .max_hw_events = 2, + .num_pcrs = 1, + .num_pic_regs = 1, +}; + +static const struct perf_event_map niagara2_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, +}; + +static const struct perf_event_map *niagara2_event_map(int event_id) +{ + return &niagara2_perfmon_event_map[event_id]; +} + +static const cache_map_t niagara2_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static const struct sparc_pmu niagara2_pmu = { + .event_map = niagara2_event_map, + .cache_map = &niagara2_cache_map, + .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), + .read_pmc = sparc_default_read_pmc, + .write_pmc = sparc_default_write_pmc, + .upper_shift = 19, + .lower_shift = 6, + .event_mask = 0xfff, + .user_bit = PCR_UTRACE, + .priv_bit = PCR_STRACE, + .hv_bit = PCR_N2_HTRACE, + .irq_bit = 0x30, + .upper_nop = 0x220, + .lower_nop = 0x220, + .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | + SPARC_PMU_HAS_CONFLICTS), + .max_hw_events = 2, + .num_pcrs = 1, + .num_pic_regs = 1, +}; + +static const struct perf_event_map niagara4_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) }, + [PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 }, + [PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 }, + [PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f }, +}; + +static const struct perf_event_map *niagara4_event_map(int event_id) +{ + return &niagara4_perfmon_event_map[event_id]; +} + +static const cache_map_t niagara4_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 }, + [C(RESULT_MISS)] = { (16 << 6) | 0x07 }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 }, + [C(RESULT_MISS)] = { (16 << 6) | 0x07 }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x3f }, + [C(RESULT_MISS)] = { (11 << 6) | 0x03 }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { (17 << 6) | 0x3f }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { (6 << 6) | 0x3f }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static u32 sparc_vt_read_pmc(int idx) +{ + u64 val = pcr_ops->read_pic(idx); + + return val & 0xffffffff; +} + +static void sparc_vt_write_pmc(int idx, u64 val) +{ + u64 pcr; + + pcr = pcr_ops->read_pcr(idx); + /* ensure ov and ntc are reset */ + pcr &= ~(PCR_N4_OV | PCR_N4_NTC); + + pcr_ops->write_pic(idx, val & 0xffffffff); + + pcr_ops->write_pcr(idx, pcr); +} + +static const struct sparc_pmu niagara4_pmu = { + .event_map = niagara4_event_map, + .cache_map = &niagara4_cache_map, + .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), + .read_pmc = sparc_vt_read_pmc, + .write_pmc = sparc_vt_write_pmc, + .upper_shift = 5, + .lower_shift = 5, + .event_mask = 0x7ff, + .user_bit = PCR_N4_UTRACE, + .priv_bit = PCR_N4_STRACE, + + /* We explicitly don't support hypervisor tracing. The T4 + * generates the overflow event for precise events via a trap + * which will not be generated (ie. it's completely lost) if + * we happen to be in the hypervisor when the event triggers. + * Essentially, the overflow event reporting is completely + * unusable when you have hypervisor mode tracing enabled. + */ + .hv_bit = 0, + + .irq_bit = PCR_N4_TOE, + .upper_nop = 0, + .lower_nop = 0, + .flags = 0, + .max_hw_events = 4, + .num_pcrs = 4, + .num_pic_regs = 4, +}; + +static const struct sparc_pmu sparc_m7_pmu = { + .event_map = niagara4_event_map, + .cache_map = &niagara4_cache_map, + .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), + .read_pmc = sparc_vt_read_pmc, + .write_pmc = sparc_vt_write_pmc, + .upper_shift = 5, + .lower_shift = 5, + .event_mask = 0x7ff, + .user_bit = PCR_N4_UTRACE, + .priv_bit = PCR_N4_STRACE, + + /* We explicitly don't support hypervisor tracing. */ + .hv_bit = 0, + + .irq_bit = PCR_N4_TOE, + .upper_nop = 0, + .lower_nop = 0, + .flags = 0, + .max_hw_events = 4, + .num_pcrs = 4, + .num_pic_regs = 4, +}; +static const struct sparc_pmu *sparc_pmu __read_mostly; + +static u64 event_encoding(u64 event_id, int idx) +{ + if (idx == PIC_UPPER_INDEX) + event_id <<= sparc_pmu->upper_shift; + else + event_id <<= sparc_pmu->lower_shift; + return event_id; +} + +static u64 mask_for_index(int idx) +{ + return event_encoding(sparc_pmu->event_mask, idx); +} + +static u64 nop_for_index(int idx) +{ + return event_encoding(idx == PIC_UPPER_INDEX ? + sparc_pmu->upper_nop : + sparc_pmu->lower_nop, idx); +} + +static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) +{ + u64 enc, val, mask = mask_for_index(idx); + int pcr_index = 0; + + if (sparc_pmu->num_pcrs > 1) + pcr_index = idx; + + enc = perf_event_get_enc(cpuc->events[idx]); + + val = cpuc->pcr[pcr_index]; + val &= ~mask; + val |= event_encoding(enc, idx); + cpuc->pcr[pcr_index] = val; + + pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); +} + +static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) +{ + u64 mask = mask_for_index(idx); + u64 nop = nop_for_index(idx); + int pcr_index = 0; + u64 val; + + if (sparc_pmu->num_pcrs > 1) + pcr_index = idx; + + val = cpuc->pcr[pcr_index]; + val &= ~mask; + val |= nop; + cpuc->pcr[pcr_index] = val; + + pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); +} + +static u64 sparc_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + int shift = 64 - 32; + u64 prev_raw_count, new_raw_count; + s64 delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = sparc_pmu->read_pmc(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static int sparc_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ + if (unlikely(period != hwc->last_period)) + left = period - (hwc->last_period - left); + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + if (left > MAX_PERIOD) + left = MAX_PERIOD; + + local64_set(&hwc->prev_count, (u64)-left); + + sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +static void read_in_all_counters(struct cpu_hw_events *cpuc) +{ + int i; + + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *cp = cpuc->event[i]; + + if (cpuc->current_idx[i] != PIC_NO_INDEX && + cpuc->current_idx[i] != cp->hw.idx) { + sparc_perf_event_update(cp, &cp->hw, + cpuc->current_idx[i]); + cpuc->current_idx[i] = PIC_NO_INDEX; + if (cp->hw.state & PERF_HES_STOPPED) + cp->hw.state |= PERF_HES_ARCH; + } + } +} + +/* On this PMU all PICs are programmed using a single PCR. Calculate + * the combined control register value. + * + * For such chips we require that all of the events have the same + * configuration, so just fetch the settings from the first entry. + */ +static void calculate_single_pcr(struct cpu_hw_events *cpuc) +{ + int i; + + if (!cpuc->n_added) + goto out; + + /* Assign to counters all unassigned events. */ + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *cp = cpuc->event[i]; + struct hw_perf_event *hwc = &cp->hw; + int idx = hwc->idx; + u64 enc; + + if (cpuc->current_idx[i] != PIC_NO_INDEX) + continue; + + sparc_perf_event_set_period(cp, hwc, idx); + cpuc->current_idx[i] = idx; + + enc = perf_event_get_enc(cpuc->events[i]); + cpuc->pcr[0] &= ~mask_for_index(idx); + if (hwc->state & PERF_HES_ARCH) { + cpuc->pcr[0] |= nop_for_index(idx); + } else { + cpuc->pcr[0] |= event_encoding(enc, idx); + hwc->state = 0; + } + } +out: + cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; +} + +static void sparc_pmu_start(struct perf_event *event, int flags); + +/* On this PMU each PIC has it's own PCR control register. */ +static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) +{ + int i; + + if (!cpuc->n_added) + goto out; + + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *cp = cpuc->event[i]; + struct hw_perf_event *hwc = &cp->hw; + int idx = hwc->idx; + + if (cpuc->current_idx[i] != PIC_NO_INDEX) + continue; + + cpuc->current_idx[i] = idx; + + if (cp->hw.state & PERF_HES_ARCH) + continue; + + sparc_pmu_start(cp, PERF_EF_RELOAD); + } +out: + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *cp = cpuc->event[i]; + int idx = cp->hw.idx; + + cpuc->pcr[idx] |= cp->hw.config_base; + } +} + +/* If performance event entries have been added, move existing events + * around (if necessary) and then assign new entries to counters. + */ +static void update_pcrs_for_enable(struct cpu_hw_events *cpuc) +{ + if (cpuc->n_added) + read_in_all_counters(cpuc); + + if (sparc_pmu->num_pcrs == 1) { + calculate_single_pcr(cpuc); + } else { + calculate_multiple_pcrs(cpuc); + } +} + +static void sparc_pmu_enable(struct pmu *pmu) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int i; + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + if (cpuc->n_events) + update_pcrs_for_enable(cpuc); + + for (i = 0; i < sparc_pmu->num_pcrs; i++) + pcr_ops->write_pcr(i, cpuc->pcr[i]); +} + +static void sparc_pmu_disable(struct pmu *pmu) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int i; + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + cpuc->n_added = 0; + + for (i = 0; i < sparc_pmu->num_pcrs; i++) { + u64 val = cpuc->pcr[i]; + + val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit | + sparc_pmu->hv_bit | sparc_pmu->irq_bit); + cpuc->pcr[i] = val; + pcr_ops->write_pcr(i, cpuc->pcr[i]); + } +} + +static int active_event_index(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + int i; + + for (i = 0; i < cpuc->n_events; i++) { + if (cpuc->event[i] == event) + break; + } + BUG_ON(i == cpuc->n_events); + return cpuc->current_idx[i]; +} + +static void sparc_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int idx = active_event_index(cpuc, event); + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + sparc_perf_event_set_period(event, &event->hw, idx); + } + + event->hw.state = 0; + + sparc_pmu_enable_event(cpuc, &event->hw, idx); + + perf_event_update_userpage(event); +} + +static void sparc_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int idx = active_event_index(cpuc, event); + + if (!(event->hw.state & PERF_HES_STOPPED)) { + sparc_pmu_disable_event(cpuc, &event->hw, idx); + event->hw.state |= PERF_HES_STOPPED; + } + + if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { + sparc_perf_event_update(event, &event->hw, idx); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void sparc_pmu_del(struct perf_event *event, int _flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + unsigned long flags; + int i; + + local_irq_save(flags); + + for (i = 0; i < cpuc->n_events; i++) { + if (event == cpuc->event[i]) { + /* Absorb the final count and turn off the + * event. + */ + sparc_pmu_stop(event, PERF_EF_UPDATE); + + /* Shift remaining entries down into + * the existing slot. + */ + while (++i < cpuc->n_events) { + cpuc->event[i - 1] = cpuc->event[i]; + cpuc->events[i - 1] = cpuc->events[i]; + cpuc->current_idx[i - 1] = + cpuc->current_idx[i]; + } + + perf_event_update_userpage(event); + + cpuc->n_events--; + break; + } + } + + local_irq_restore(flags); +} + +static void sparc_pmu_read(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int idx = active_event_index(cpuc, event); + struct hw_perf_event *hwc = &event->hw; + + sparc_perf_event_update(event, hwc, idx); +} + +static atomic_t active_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmc_grab_mutex); + +static void perf_stop_nmi_watchdog(void *unused) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int i; + + stop_nmi_watchdog(NULL); + for (i = 0; i < sparc_pmu->num_pcrs; i++) + cpuc->pcr[i] = pcr_ops->read_pcr(i); +} + +static void perf_event_grab_pmc(void) +{ + if (atomic_inc_not_zero(&active_events)) + return; + + mutex_lock(&pmc_grab_mutex); + if (atomic_read(&active_events) == 0) { + if (atomic_read(&nmi_active) > 0) { + on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); + BUG_ON(atomic_read(&nmi_active) != 0); + } + atomic_inc(&active_events); + } + mutex_unlock(&pmc_grab_mutex); +} + +static void perf_event_release_pmc(void) +{ + if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { + if (atomic_read(&nmi_active) == 0) + on_each_cpu(start_nmi_watchdog, NULL, 1); + mutex_unlock(&pmc_grab_mutex); + } +} + +static const struct perf_event_map *sparc_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct perf_event_map *pmap; + + if (!sparc_pmu->cache_map) + return ERR_PTR(-ENOENT); + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); + + if (pmap->encoding == CACHE_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + if (pmap->encoding == CACHE_OP_NONSENSE) + return ERR_PTR(-EINVAL); + + return pmap; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + perf_event_release_pmc(); +} + +/* Make sure all events can be scheduled into the hardware at + * the same time. This is simplified by the fact that we only + * need to support 2 simultaneous HW events. + * + * As a side effect, the evts[]->hw.idx values will be assigned + * on success. These are pending indexes. When the events are + * actually programmed into the chip, these values will propagate + * to the per-cpu cpuc->current_idx[] slots, see the code in + * maybe_change_configuration() for details. + */ +static int sparc_check_constraints(struct perf_event **evts, + unsigned long *events, int n_ev) +{ + u8 msk0 = 0, msk1 = 0; + int idx0 = 0; + + /* This case is possible when we are invoked from + * hw_perf_group_sched_in(). + */ + if (!n_ev) + return 0; + + if (n_ev > sparc_pmu->max_hw_events) + return -1; + + if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) { + int i; + + for (i = 0; i < n_ev; i++) + evts[i]->hw.idx = i; + return 0; + } + + msk0 = perf_event_get_msk(events[0]); + if (n_ev == 1) { + if (msk0 & PIC_LOWER) + idx0 = 1; + goto success; + } + BUG_ON(n_ev != 2); + msk1 = perf_event_get_msk(events[1]); + + /* If both events can go on any counter, OK. */ + if (msk0 == (PIC_UPPER | PIC_LOWER) && + msk1 == (PIC_UPPER | PIC_LOWER)) + goto success; + + /* If one event is limited to a specific counter, + * and the other can go on both, OK. + */ + if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && + msk1 == (PIC_UPPER | PIC_LOWER)) { + if (msk0 & PIC_LOWER) + idx0 = 1; + goto success; + } + + if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && + msk0 == (PIC_UPPER | PIC_LOWER)) { + if (msk1 & PIC_UPPER) + idx0 = 1; + goto success; + } + + /* If the events are fixed to different counters, OK. */ + if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || + (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { + if (msk0 & PIC_LOWER) + idx0 = 1; + goto success; + } + + /* Otherwise, there is a conflict. */ + return -1; + +success: + evts[0]->hw.idx = idx0; + if (n_ev == 2) + evts[1]->hw.idx = idx0 ^ 1; + return 0; +} + +static int check_excludes(struct perf_event **evts, int n_prev, int n_new) +{ + int eu = 0, ek = 0, eh = 0; + struct perf_event *event; + int i, n, first; + + if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME)) + return 0; + + n = n_prev + n_new; + if (n <= 1) + return 0; + + first = 1; + for (i = 0; i < n; i++) { + event = evts[i]; + if (first) { + eu = event->attr.exclude_user; + ek = event->attr.exclude_kernel; + eh = event->attr.exclude_hv; + first = 0; + } else if (event->attr.exclude_user != eu || + event->attr.exclude_kernel != ek || + event->attr.exclude_hv != eh) { + return -EAGAIN; + } + } + + return 0; +} + +static int collect_events(struct perf_event *group, int max_count, + struct perf_event *evts[], unsigned long *events, + int *current_idx) +{ + struct perf_event *event; + int n = 0; + + if (!is_software_event(group)) { + if (n >= max_count) + return -1; + evts[n] = group; + events[n] = group->hw.event_base; + current_idx[n++] = PIC_NO_INDEX; + } + for_each_sibling_event(event, group) { + if (!is_software_event(event) && + event->state != PERF_EVENT_STATE_OFF) { + if (n >= max_count) + return -1; + evts[n] = event; + events[n] = event->hw.event_base; + current_idx[n++] = PIC_NO_INDEX; + } + } + return n; +} + +static int sparc_pmu_add(struct perf_event *event, int ef_flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int n0, ret = -EAGAIN; + unsigned long flags; + + local_irq_save(flags); + + n0 = cpuc->n_events; + if (n0 >= sparc_pmu->max_hw_events) + goto out; + + cpuc->event[n0] = event; + cpuc->events[n0] = event->hw.event_base; + cpuc->current_idx[n0] = PIC_NO_INDEX; + + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (!(ef_flags & PERF_EF_START)) + event->hw.state |= PERF_HES_ARCH; + + /* + * If group events scheduling transaction was started, + * skip the schedulability test here, it will be performed + * at commit time(->commit_txn) as a whole + */ + if (cpuc->txn_flags & PERF_PMU_TXN_ADD) + goto nocheck; + + if (check_excludes(cpuc->event, n0, 1)) + goto out; + if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) + goto out; + +nocheck: + cpuc->n_events++; + cpuc->n_added++; + + ret = 0; +out: + local_irq_restore(flags); + return ret; +} + +static int sparc_pmu_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct perf_event *evts[MAX_HWEVENTS]; + struct hw_perf_event *hwc = &event->hw; + unsigned long events[MAX_HWEVENTS]; + int current_idx_dmy[MAX_HWEVENTS]; + const struct perf_event_map *pmap; + int n; + + if (atomic_read(&nmi_active) < 0) + return -ENODEV; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (attr->type) { + case PERF_TYPE_HARDWARE: + if (attr->config >= sparc_pmu->max_events) + return -EINVAL; + pmap = sparc_pmu->event_map(attr->config); + break; + + case PERF_TYPE_HW_CACHE: + pmap = sparc_map_cache_event(attr->config); + if (IS_ERR(pmap)) + return PTR_ERR(pmap); + break; + + case PERF_TYPE_RAW: + pmap = NULL; + break; + + default: + return -ENOENT; + + } + + if (pmap) { + hwc->event_base = perf_event_encode(pmap); + } else { + /* + * User gives us "(encoding << 16) | pic_mask" for + * PERF_TYPE_RAW events. + */ + hwc->event_base = attr->config; + } + + /* We save the enable bits in the config_base. */ + hwc->config_base = sparc_pmu->irq_bit; + if (!attr->exclude_user) + hwc->config_base |= sparc_pmu->user_bit; + if (!attr->exclude_kernel) + hwc->config_base |= sparc_pmu->priv_bit; + if (!attr->exclude_hv) + hwc->config_base |= sparc_pmu->hv_bit; + + n = 0; + if (event->group_leader != event) { + n = collect_events(event->group_leader, + sparc_pmu->max_hw_events - 1, + evts, events, current_idx_dmy); + if (n < 0) + return -EINVAL; + } + events[n] = hwc->event_base; + evts[n] = event; + + if (check_excludes(evts, n, 1)) + return -EINVAL; + + if (sparc_check_constraints(evts, events, n + 1)) + return -EINVAL; + + hwc->idx = PIC_NO_INDEX; + + /* Try to do all error checking before this point, as unwinding + * state after grabbing the PMC is difficult. + */ + perf_event_grab_pmc(); + event->destroy = hw_perf_event_destroy; + + if (!hwc->sample_period) { + hwc->sample_period = MAX_PERIOD; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +/* + * Start group events scheduling transaction + * Set the flag to make pmu::enable() not perform the + * schedulability test, it will be performed at commit time + */ +static void sparc_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) +{ + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + + WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ + + cpuhw->txn_flags = txn_flags; + if (txn_flags & ~PERF_PMU_TXN_ADD) + return; + + perf_pmu_disable(pmu); +} + +/* + * Stop group events scheduling transaction + * Clear the flag and pmu::enable() will perform the + * schedulability test. + */ +static void sparc_pmu_cancel_txn(struct pmu *pmu) +{ + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + unsigned int txn_flags; + + WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ + + txn_flags = cpuhw->txn_flags; + cpuhw->txn_flags = 0; + if (txn_flags & ~PERF_PMU_TXN_ADD) + return; + + perf_pmu_enable(pmu); +} + +/* + * Commit group events scheduling transaction + * Perform the group schedulability test as a whole + * Return 0 if success + */ +static int sparc_pmu_commit_txn(struct pmu *pmu) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int n; + + if (!sparc_pmu) + return -EINVAL; + + WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ + + if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) { + cpuc->txn_flags = 0; + return 0; + } + + n = cpuc->n_events; + if (check_excludes(cpuc->event, 0, n)) + return -EINVAL; + if (sparc_check_constraints(cpuc->event, cpuc->events, n)) + return -EAGAIN; + + cpuc->txn_flags = 0; + perf_pmu_enable(pmu); + return 0; +} + +static struct pmu pmu = { + .pmu_enable = sparc_pmu_enable, + .pmu_disable = sparc_pmu_disable, + .event_init = sparc_pmu_event_init, + .add = sparc_pmu_add, + .del = sparc_pmu_del, + .start = sparc_pmu_start, + .stop = sparc_pmu_stop, + .read = sparc_pmu_read, + .start_txn = sparc_pmu_start_txn, + .cancel_txn = sparc_pmu_cancel_txn, + .commit_txn = sparc_pmu_commit_txn, +}; + +void perf_event_print_debug(void) +{ + unsigned long flags; + int cpu, i; + + if (!sparc_pmu) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pr_info("\n"); + for (i = 0; i < sparc_pmu->num_pcrs; i++) + pr_info("CPU#%d: PCR%d[%016llx]\n", + cpu, i, pcr_ops->read_pcr(i)); + for (i = 0; i < sparc_pmu->num_pic_regs; i++) + pr_info("CPU#%d: PIC%d[%016llx]\n", + cpu, i, pcr_ops->read_pic(i)); + + local_irq_restore(flags); +} + +static int __kprobes perf_event_nmi_handler(struct notifier_block *self, + unsigned long cmd, void *__args) +{ + struct die_args *args = __args; + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + u64 finish_clock; + u64 start_clock; + int i; + + if (!atomic_read(&active_events)) + return NOTIFY_DONE; + + switch (cmd) { + case DIE_NMI: + break; + + default: + return NOTIFY_DONE; + } + + start_clock = sched_clock(); + + regs = args->regs; + + cpuc = this_cpu_ptr(&cpu_hw_events); + + /* If the PMU has the TOE IRQ enable bits, we need to do a + * dummy write to the %pcr to clear the overflow bits and thus + * the interrupt. + * + * Do this before we peek at the counters to determine + * overflow so we don't lose any events. + */ + if (sparc_pmu->irq_bit && + sparc_pmu->num_pcrs == 1) + pcr_ops->write_pcr(0, cpuc->pcr[0]); + + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *event = cpuc->event[i]; + int idx = cpuc->current_idx[i]; + struct hw_perf_event *hwc; + u64 val; + + if (sparc_pmu->irq_bit && + sparc_pmu->num_pcrs > 1) + pcr_ops->write_pcr(idx, cpuc->pcr[idx]); + + hwc = &event->hw; + val = sparc_perf_event_update(event, hwc, idx); + if (val & (1ULL << 31)) + continue; + + perf_sample_data_init(&data, 0, hwc->last_period); + if (!sparc_perf_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, &data, regs)) + sparc_pmu_stop(event, 0); + } + + finish_clock = sched_clock(); + + perf_sample_event_took(finish_clock - start_clock); + + return NOTIFY_STOP; +} + +static __read_mostly struct notifier_block perf_event_nmi_notifier = { + .notifier_call = perf_event_nmi_handler, +}; + +static bool __init supported_pmu(void) +{ + if (!strcmp(sparc_pmu_type, "ultra3") || + !strcmp(sparc_pmu_type, "ultra3+") || + !strcmp(sparc_pmu_type, "ultra3i") || + !strcmp(sparc_pmu_type, "ultra4+")) { + sparc_pmu = &ultra3_pmu; + return true; + } + if (!strcmp(sparc_pmu_type, "niagara")) { + sparc_pmu = &niagara1_pmu; + return true; + } + if (!strcmp(sparc_pmu_type, "niagara2") || + !strcmp(sparc_pmu_type, "niagara3")) { + sparc_pmu = &niagara2_pmu; + return true; + } + if (!strcmp(sparc_pmu_type, "niagara4") || + !strcmp(sparc_pmu_type, "niagara5")) { + sparc_pmu = &niagara4_pmu; + return true; + } + if (!strcmp(sparc_pmu_type, "sparc-m7")) { + sparc_pmu = &sparc_m7_pmu; + return true; + } + return false; +} + +static int __init init_hw_perf_events(void) +{ + int err; + + pr_info("Performance events: "); + + err = pcr_arch_init(); + if (err || !supported_pmu()) { + pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); + return 0; + } + + pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + register_die_notifier(&perf_event_nmi_notifier); + + return 0; +} +pure_initcall(init_hw_perf_events); + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long ksp, fp; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int graph = 0; +#endif + + stack_trace_flush(); + + perf_callchain_store(entry, regs->tpc); + + ksp = regs->u_regs[UREG_I6]; + fp = ksp + STACK_BIAS; + do { + struct sparc_stackf *sf; + struct pt_regs *regs; + unsigned long pc; + + if (!kstack_valid(current_thread_info(), fp)) + break; + + sf = (struct sparc_stackf *) fp; + regs = (struct pt_regs *) (sf + 1); + + if (kstack_is_trap_frame(current_thread_info(), regs)) { + if (user_mode(regs)) + break; + pc = regs->tpc; + fp = regs->u_regs[UREG_I6] + STACK_BIAS; + } else { + pc = sf->callers_pc; + fp = (unsigned long)sf->fp + STACK_BIAS; + } + perf_callchain_store(entry, pc); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + struct ftrace_ret_stack *ret_stack; + ret_stack = ftrace_graph_get_ret_stack(current, + graph); + if (ret_stack) { + pc = ret_stack->ret; + perf_callchain_store(entry, pc); + graph++; + } + } +#endif + } while (entry->nr < entry->max_stack); +} + +static inline int +valid_user_frame(const void __user *fp, unsigned long size) +{ + /* addresses should be at least 4-byte aligned */ + if (((unsigned long) fp) & 3) + return 0; + + return (__range_not_ok(fp, size, TASK_SIZE) == 0); +} + +static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long ufp; + + ufp = regs->u_regs[UREG_FP] + STACK_BIAS; + do { + struct sparc_stackf __user *usf; + struct sparc_stackf sf; + unsigned long pc; + + usf = (struct sparc_stackf __user *)ufp; + if (!valid_user_frame(usf, sizeof(sf))) + break; + + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + + pc = sf.callers_pc; + ufp = (unsigned long)sf.fp + STACK_BIAS; + perf_callchain_store(entry, pc); + } while (entry->nr < entry->max_stack); +} + +static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long ufp; + + ufp = regs->u_regs[UREG_FP] & 0xffffffffUL; + do { + unsigned long pc; + + if (thread32_stack_is_64bit(ufp)) { + struct sparc_stackf __user *usf; + struct sparc_stackf sf; + + ufp += STACK_BIAS; + usf = (struct sparc_stackf __user *)ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + pc = sf.callers_pc & 0xffffffff; + ufp = ((unsigned long) sf.fp) & 0xffffffff; + } else { + struct sparc_stackf32 __user *usf; + struct sparc_stackf32 sf; + usf = (struct sparc_stackf32 __user *)ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + pc = sf.callers_pc; + ufp = (unsigned long)sf.fp; + } + perf_callchain_store(entry, pc); + } while (entry->nr < entry->max_stack); +} + +void +perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) +{ + u64 saved_fault_address = current_thread_info()->fault_address; + u8 saved_fault_code = get_thread_fault_code(); + + perf_callchain_store(entry, regs->tpc); + + if (!current->mm) + return; + + flushw_user(); + + pagefault_disable(); + + if (test_thread_flag(TIF_32BIT)) + perf_callchain_user_32(entry, regs); + else + perf_callchain_user_64(entry, regs); + + pagefault_enable(); + + set_thread_fault_code(saved_fault_code); + current_thread_info()->fault_address = saved_fault_address; +} diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c new file mode 100644 index 000000000..b5c1eb33b --- /dev/null +++ b/arch/sparc/kernel/pmc.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* pmc - Driver implementation for power management functions + * of Power Management Controller (PMC) on SPARCstation-Voyager. + * + * Copyright (c) 2002 Eric Brower (ebrower@usa.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* Debug + * + * #define PMC_DEBUG_LED + * #define PMC_NO_IDLE + */ + +#define PMC_OBPNAME "SUNW,pmc" +#define PMC_DEVNAME "pmc" + +#define PMC_IDLE_REG 0x00 +#define PMC_IDLE_ON 0x01 + +static u8 __iomem *regs; + +#define pmc_readb(offs) (sbus_readb(regs+offs)) +#define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs)) + +/* + * CPU idle callback function + * See .../arch/sparc/kernel/process.c + */ +static void pmc_swift_idle(void) +{ +#ifdef PMC_DEBUG_LED + set_auxio(0x00, AUXIO_LED); +#endif + + pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG); + +#ifdef PMC_DEBUG_LED + set_auxio(AUXIO_LED, 0x00); +#endif +} + +static int pmc_probe(struct platform_device *op) +{ + regs = of_ioremap(&op->resource[0], 0, + resource_size(&op->resource[0]), PMC_OBPNAME); + if (!regs) { + printk(KERN_ERR "%s: unable to map registers\n", PMC_DEVNAME); + return -ENODEV; + } + +#ifndef PMC_NO_IDLE + /* Assign power management IDLE handler */ + sparc_idle = pmc_swift_idle; +#endif + + printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); + return 0; +} + +static const struct of_device_id pmc_match[] = { + { + .name = PMC_OBPNAME, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, pmc_match); + +static struct platform_driver pmc_driver = { + .driver = { + .name = "pmc", + .of_match_table = pmc_match, + }, + .probe = pmc_probe, +}; + +static int __init pmc_init(void) +{ + return platform_driver_register(&pmc_driver); +} + +/* This driver is not critical to the boot process + * and is easiest to ioremap when SBus is already + * initialized, so we install ourselves thusly: + */ +__initcall(pmc_init); diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c new file mode 100644 index 000000000..d941875dd --- /dev/null +++ b/arch/sparc/kernel/power.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* power.c: Power management driver. + * + * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +static void __iomem *power_reg; + +static irqreturn_t power_handler(int irq, void *dev_id) +{ + orderly_poweroff(true); + + /* FIXME: Check registers for status... */ + return IRQ_HANDLED; +} + +static int has_button_interrupt(unsigned int irq, struct device_node *dp) +{ + if (irq == 0xffffffff) + return 0; + if (!of_find_property(dp, "button", NULL)) + return 0; + + return 1; +} + +static int power_probe(struct platform_device *op) +{ + struct resource *res = &op->resource[0]; + unsigned int irq = op->archdata.irqs[0]; + + power_reg = of_ioremap(res, 0, 0x4, "power"); + + printk(KERN_INFO "%pOFn: Control reg at %llx\n", + op->dev.of_node, res->start); + + if (has_button_interrupt(irq, op->dev.of_node)) { + if (request_irq(irq, + power_handler, 0, "power", NULL) < 0) + printk(KERN_ERR "power: Cannot setup IRQ handler.\n"); + } + + return 0; +} + +static const struct of_device_id power_match[] = { + { + .name = "power", + }, + {}, +}; + +static struct platform_driver power_driver = { + .probe = power_probe, + .driver = { + .name = "power", + .of_match_table = power_match, + }, +}; + +builtin_platform_driver(power_driver); diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c new file mode 100644 index 000000000..0442ab005 --- /dev/null +++ b/arch/sparc/kernel/process.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * This file handles the architecture independent parts of process handling.. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" + +asmlinkage long sparc_fork(struct pt_regs *regs) +{ + unsigned long orig_i1 = regs->u_regs[UREG_I1]; + long ret; + struct kernel_clone_args args = { + .exit_signal = SIGCHLD, + /* Reuse the parent's stack for the child. */ + .stack = regs->u_regs[UREG_FP], + }; + + ret = kernel_clone(&args); + + /* If we get an error and potentially restart the system + * call, we're screwed because copy_thread() clobbered + * the parent's %o1. So detect that case and restore it + * here. + */ + if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) + regs->u_regs[UREG_I1] = orig_i1; + + return ret; +} + +asmlinkage long sparc_vfork(struct pt_regs *regs) +{ + unsigned long orig_i1 = regs->u_regs[UREG_I1]; + long ret; + + struct kernel_clone_args args = { + .flags = CLONE_VFORK | CLONE_VM, + .exit_signal = SIGCHLD, + /* Reuse the parent's stack for the child. */ + .stack = regs->u_regs[UREG_FP], + }; + + ret = kernel_clone(&args); + + /* If we get an error and potentially restart the system + * call, we're screwed because copy_thread() clobbered + * the parent's %o1. So detect that case and restore it + * here. + */ + if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) + regs->u_regs[UREG_I1] = orig_i1; + + return ret; +} + +asmlinkage long sparc_clone(struct pt_regs *regs) +{ + unsigned long orig_i1 = regs->u_regs[UREG_I1]; + unsigned int flags = lower_32_bits(regs->u_regs[UREG_I0]); + long ret; + + struct kernel_clone_args args = { + .flags = (flags & ~CSIGNAL), + .exit_signal = (flags & CSIGNAL), + .tls = regs->u_regs[UREG_I3], + }; + +#ifdef CONFIG_COMPAT + if (test_thread_flag(TIF_32BIT)) { + args.pidfd = compat_ptr(regs->u_regs[UREG_I2]); + args.child_tid = compat_ptr(regs->u_regs[UREG_I4]); + args.parent_tid = compat_ptr(regs->u_regs[UREG_I2]); + } else +#endif + { + args.pidfd = (int __user *)regs->u_regs[UREG_I2]; + args.child_tid = (int __user *)regs->u_regs[UREG_I4]; + args.parent_tid = (int __user *)regs->u_regs[UREG_I2]; + } + + /* Did userspace give setup a separate stack for the child or are we + * reusing the parent's? + */ + if (regs->u_regs[UREG_I1]) + args.stack = regs->u_regs[UREG_I1]; + else + args.stack = regs->u_regs[UREG_FP]; + + ret = kernel_clone(&args); + + /* If we get an error and potentially restart the system + * call, we're screwed because copy_thread() clobbered + * the parent's %o1. So detect that case and restore it + * here. + */ + if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) + regs->u_regs[UREG_I1] = orig_i1; + + return ret; +} diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c new file mode 100644 index 000000000..33b0215a4 --- /dev/null +++ b/arch/sparc/kernel/process_32.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* linux/arch/sparc/kernel/process.c + * + * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) + */ + +/* + * This file handles the architecture-dependent parts of process handling.. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" + +/* + * Power management idle function + * Set in pm platform drivers (apc.c and pmc.c) + */ +void (*sparc_idle)(void); + +/* + * Power-off handler instantiation for pm.h compliance + * This is done via auxio, but could be used as a fallback + * handler when auxio is not present-- unused for now... + */ +void (*pm_power_off)(void) = machine_power_off; +EXPORT_SYMBOL(pm_power_off); + +/* + * sysctl - toggle power-off restriction for serial console + * systems in machine_power_off() + */ +int scons_pwroff = 1; + +extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); + +struct task_struct *last_task_used_math = NULL; +struct thread_info *current_set[NR_CPUS]; + +/* Idle loop support. */ +void arch_cpu_idle(void) +{ + if (sparc_idle) + (*sparc_idle)(); + raw_local_irq_enable(); +} + +/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ +void machine_halt(void) +{ + local_irq_enable(); + mdelay(8); + local_irq_disable(); + prom_halt(); + panic("Halt failed!"); +} + +void machine_restart(char * cmd) +{ + char *p; + + local_irq_enable(); + mdelay(8); + local_irq_disable(); + + p = strchr (reboot_command, '\n'); + if (p) *p = 0; + if (cmd) + prom_reboot(cmd); + if (*reboot_command) + prom_reboot(reboot_command); + prom_feval ("reset"); + panic("Reboot failed!"); +} + +void machine_power_off(void) +{ + if (auxio_power_register && + (!of_node_is_type(of_console_device, "serial") || scons_pwroff)) { + u8 power_register = sbus_readb(auxio_power_register); + power_register |= AUXIO_POWER_OFF; + sbus_writeb(power_register, auxio_power_register); + } + + machine_halt(); +} + +void show_regs(struct pt_regs *r) +{ + struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14]; + + show_regs_print_info(KERN_DEFAULT); + + printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", + r->psr, r->pc, r->npc, r->y, print_tainted()); + printk("PC: <%pS>\n", (void *) r->pc); + printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], + r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); + printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], + r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); + printk("RPC: <%pS>\n", (void *) r->u_regs[15]); + + printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], + rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]); + printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], + rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]); +} + +/* + * The show_stack() is external API which we do not use ourselves. + * The oops is printed in die_if_kernel. + */ +void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl) +{ + unsigned long pc, fp; + unsigned long task_base; + struct reg_window32 *rw; + int count = 0; + + if (!tsk) + tsk = current; + + if (tsk == current && !_ksp) + __asm__ __volatile__("mov %%fp, %0" : "=r" (_ksp)); + + task_base = (unsigned long) task_stack_page(tsk); + fp = (unsigned long) _ksp; + do { + /* Bogus frame pointer? */ + if (fp < (task_base + sizeof(struct thread_info)) || + fp >= (task_base + (PAGE_SIZE << 1))) + break; + rw = (struct reg_window32 *) fp; + pc = rw->ins[7]; + printk("%s[%08lx : ", loglvl, pc); + printk("%s%pS ] ", loglvl, (void *) pc); + fp = rw->ins[6]; + } while (++count < 16); + printk("%s\n", loglvl); +} + +/* + * Free current thread data structures etc.. + */ +void exit_thread(struct task_struct *tsk) +{ +#ifndef CONFIG_SMP + if (last_task_used_math == tsk) { +#else + if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { +#endif + /* Keep process from leaving FPU in a bogon state. */ + put_psr(get_psr() | PSR_EF); + fpsave(&tsk->thread.float_regs[0], &tsk->thread.fsr, + &tsk->thread.fpqueue[0], &tsk->thread.fpqdepth); +#ifndef CONFIG_SMP + last_task_used_math = NULL; +#else + clear_ti_thread_flag(task_thread_info(tsk), TIF_USEDFPU); +#endif + } +} + +void flush_thread(void) +{ + current_thread_info()->w_saved = 0; + +#ifndef CONFIG_SMP + if(last_task_used_math == current) { +#else + if (test_thread_flag(TIF_USEDFPU)) { +#endif + /* Clean the fpu. */ + put_psr(get_psr() | PSR_EF); + fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, + ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); +#ifndef CONFIG_SMP + last_task_used_math = NULL; +#else + clear_thread_flag(TIF_USEDFPU); +#endif + } +} + +static inline struct sparc_stackf __user * +clone_stackframe(struct sparc_stackf __user *dst, + struct sparc_stackf __user *src) +{ + unsigned long size, fp; + struct sparc_stackf *tmp; + struct sparc_stackf __user *sp; + + if (get_user(tmp, &src->fp)) + return NULL; + + fp = (unsigned long) tmp; + size = (fp - ((unsigned long) src)); + fp = (unsigned long) dst; + sp = (struct sparc_stackf __user *)(fp - size); + + /* do_fork() grabs the parent semaphore, we must release it + * temporarily so we can build the child clone stack frame + * without deadlocking. + */ + if (__copy_user(sp, src, size)) + sp = NULL; + else if (put_user(fp, &sp->fp)) + sp = NULL; + + return sp; +} + +/* Copy a Sparc thread. The fork() return value conventions + * under SunOS are nothing short of bletcherous: + * Parent --> %o0 == childs pid, %o1 == 0 + * Child --> %o0 == parents pid, %o1 == 1 + * + * NOTE: We have a separate fork kpsr/kwim because + * the parent could change these values between + * sys_fork invocation and when we reach here + * if the parent should sleep while trying to + * allocate the task_struct and kernel stack in + * do_fork(). + * XXX See comment above sys_vfork in sparc64. todo. + */ +extern void ret_from_fork(void); +extern void ret_from_kernel_thread(void); + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long sp = args->stack; + unsigned long tls = args->tls; + struct thread_info *ti = task_thread_info(p); + struct pt_regs *childregs, *regs = current_pt_regs(); + char *new_stack; + +#ifndef CONFIG_SMP + if(last_task_used_math == current) { +#else + if (test_thread_flag(TIF_USEDFPU)) { +#endif + put_psr(get_psr() | PSR_EF); + fpsave(&p->thread.float_regs[0], &p->thread.fsr, + &p->thread.fpqueue[0], &p->thread.fpqdepth); + } + + /* + * p->thread_info new_stack childregs stack bottom + * ! ! ! ! + * V V (stk.fr.) V (pt_regs) V + * +----- - - - - - ------+===========+=============+ + */ + new_stack = task_stack_page(p) + THREAD_SIZE; + new_stack -= STACKFRAME_SZ + TRACEREG_SZ; + childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); + + /* + * A new process must start with interrupts disabled, see schedule_tail() + * and finish_task_switch(). (If we do not do it and if a timer interrupt + * hits before we unlock and attempts to take the rq->lock, we deadlock.) + * + * Thus, kpsr |= PSR_PIL. + */ + ti->ksp = (unsigned long) new_stack; + p->thread.kregs = childregs; + + if (unlikely(args->fn)) { + extern int nwindows; + unsigned long psr; + memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); + ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8); + childregs->u_regs[UREG_G1] = (unsigned long) args->fn; + childregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg; + psr = childregs->psr = get_psr(); + ti->kpsr = psr | PSR_PIL; + ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows); + return 0; + } + memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); + childregs->u_regs[UREG_FP] = sp; + ti->kpc = (((unsigned long) ret_from_fork) - 0x8); + ti->kpsr = current->thread.fork_kpsr | PSR_PIL; + ti->kwim = current->thread.fork_kwim; + + if (sp != regs->u_regs[UREG_FP]) { + struct sparc_stackf __user *childstack; + struct sparc_stackf __user *parentstack; + + /* + * This is a clone() call with supplied user stack. + * Set some valid stack frames to give to the child. + */ + childstack = (struct sparc_stackf __user *) + (sp & ~0xfUL); + parentstack = (struct sparc_stackf __user *) + regs->u_regs[UREG_FP]; + +#if 0 + printk("clone: parent stack:\n"); + show_stackframe(parentstack); +#endif + + childstack = clone_stackframe(childstack, parentstack); + if (!childstack) + return -EFAULT; + +#if 0 + printk("clone: child stack:\n"); + show_stackframe(childstack); +#endif + + childregs->u_regs[UREG_FP] = (unsigned long)childstack; + } + +#ifdef CONFIG_SMP + /* FPU must be disabled on SMP. */ + childregs->psr &= ~PSR_EF; + clear_tsk_thread_flag(p, TIF_USEDFPU); +#endif + + /* Set the return value for the child. */ + childregs->u_regs[UREG_I0] = current->pid; + childregs->u_regs[UREG_I1] = 1; + + /* Set the return value for the parent. */ + regs->u_regs[UREG_I1] = 0; + + if (clone_flags & CLONE_SETTLS) + childregs->u_regs[UREG_G7] = tls; + + return 0; +} + +unsigned long __get_wchan(struct task_struct *task) +{ + unsigned long pc, fp, bias = 0; + unsigned long task_base = (unsigned long) task; + unsigned long ret = 0; + struct reg_window32 *rw; + int count = 0; + + fp = task_thread_info(task)->ksp + bias; + do { + /* Bogus frame pointer? */ + if (fp < (task_base + sizeof(struct thread_info)) || + fp >= (task_base + (2 * PAGE_SIZE))) + break; + rw = (struct reg_window32 *) fp; + pc = rw->ins[7]; + if (!in_sched_functions(pc)) { + ret = pc; + goto out; + } + fp = rw->ins[6] + bias; + } while (++count < 16); + +out: + return ret; +} + diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c new file mode 100644 index 000000000..6335b698a --- /dev/null +++ b/arch/sparc/kernel/process_64.c @@ -0,0 +1,682 @@ +// SPDX-License-Identifier: GPL-2.0 +/* arch/sparc64/kernel/process.c + * + * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +/* + * This file handles the architecture-dependent parts of process handling.. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kstack.h" + +/* Idle loop support on sparc64. */ +void arch_cpu_idle(void) +{ + if (tlb_type != hypervisor) { + touch_nmi_watchdog(); + raw_local_irq_enable(); + } else { + unsigned long pstate; + + raw_local_irq_enable(); + + /* The sun4v sleeping code requires that we have PSTATE.IE cleared over + * the cpu sleep hypervisor call. + */ + __asm__ __volatile__( + "rdpr %%pstate, %0\n\t" + "andn %0, %1, %0\n\t" + "wrpr %0, %%g0, %%pstate" + : "=&r" (pstate) + : "i" (PSTATE_IE)); + + if (!need_resched() && !cpu_is_offline(smp_processor_id())) { + sun4v_cpu_yield(); + /* If resumed by cpu_poke then we need to explicitly + * call scheduler_ipi(). + */ + scheduler_poke(); + } + + /* Re-enable interrupts. */ + __asm__ __volatile__( + "rdpr %%pstate, %0\n\t" + "or %0, %1, %0\n\t" + "wrpr %0, %%g0, %%pstate" + : "=&r" (pstate) + : "i" (PSTATE_IE)); + } +} + +#ifdef CONFIG_HOTPLUG_CPU +void arch_cpu_idle_dead(void) +{ + sched_preempt_enable_no_resched(); + cpu_play_dead(); +} +#endif + +#ifdef CONFIG_COMPAT +static void show_regwindow32(struct pt_regs *regs) +{ + struct reg_window32 __user *rw; + struct reg_window32 r_w; + + __asm__ __volatile__ ("flushw"); + rw = compat_ptr((unsigned int)regs->u_regs[14]); + if (copy_from_user (&r_w, rw, sizeof(r_w))) { + return; + } + + printk("l0: %08x l1: %08x l2: %08x l3: %08x " + "l4: %08x l5: %08x l6: %08x l7: %08x\n", + r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], + r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); + printk("i0: %08x i1: %08x i2: %08x i3: %08x " + "i4: %08x i5: %08x i6: %08x i7: %08x\n", + r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], + r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); +} +#else +#define show_regwindow32(regs) do { } while (0) +#endif + +static void show_regwindow(struct pt_regs *regs) +{ + struct reg_window __user *rw; + struct reg_window *rwk; + struct reg_window r_w; + + if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { + __asm__ __volatile__ ("flushw"); + rw = (struct reg_window __user *) + (regs->u_regs[14] + STACK_BIAS); + rwk = (struct reg_window *) + (regs->u_regs[14] + STACK_BIAS); + if (!(regs->tstate & TSTATE_PRIV)) { + if (copy_from_user (&r_w, rw, sizeof(r_w))) { + return; + } + rwk = &r_w; + } + } else { + show_regwindow32(regs); + return; + } + printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", + rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); + printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", + rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); + printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", + rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); + printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", + rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); + if (regs->tstate & TSTATE_PRIV) + printk("I7: <%pS>\n", (void *) rwk->ins[7]); +} + +void show_regs(struct pt_regs *regs) +{ + show_regs_print_info(KERN_DEFAULT); + + printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, + regs->tpc, regs->tnpc, regs->y, print_tainted()); + printk("TPC: <%pS>\n", (void *) regs->tpc); + printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", + regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], + regs->u_regs[3]); + printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", + regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], + regs->u_regs[7]); + printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", + regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], + regs->u_regs[11]); + printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", + regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], + regs->u_regs[15]); + printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); + show_regwindow(regs); + show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT); +} + +union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; +static DEFINE_SPINLOCK(global_cpu_snapshot_lock); + +static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, + int this_cpu) +{ + struct global_reg_snapshot *rp; + + flushw_all(); + + rp = &global_cpu_snapshot[this_cpu].reg; + + rp->tstate = regs->tstate; + rp->tpc = regs->tpc; + rp->tnpc = regs->tnpc; + rp->o7 = regs->u_regs[UREG_I7]; + + if (regs->tstate & TSTATE_PRIV) { + struct reg_window *rw; + + rw = (struct reg_window *) + (regs->u_regs[UREG_FP] + STACK_BIAS); + if (kstack_valid(tp, (unsigned long) rw)) { + rp->i7 = rw->ins[7]; + rw = (struct reg_window *) + (rw->ins[6] + STACK_BIAS); + if (kstack_valid(tp, (unsigned long) rw)) + rp->rpc = rw->ins[7]; + } + } else { + rp->i7 = 0; + rp->rpc = 0; + } + rp->thread = tp; +} + +/* In order to avoid hangs we do not try to synchronize with the + * global register dump client cpus. The last store they make is to + * the thread pointer, so do a short poll waiting for that to become + * non-NULL. + */ +static void __global_reg_poll(struct global_reg_snapshot *gp) +{ + int limit = 0; + + while (!gp->thread && ++limit < 100) { + barrier(); + udelay(1); + } +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +{ + struct thread_info *tp = current_thread_info(); + struct pt_regs *regs = get_irq_regs(); + unsigned long flags; + int this_cpu, cpu; + + if (!regs) + regs = tp->kregs; + + spin_lock_irqsave(&global_cpu_snapshot_lock, flags); + + this_cpu = raw_smp_processor_id(); + + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); + + if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) + __global_reg_self(tp, regs, this_cpu); + + smp_fetch_global_regs(); + + for_each_cpu(cpu, mask) { + struct global_reg_snapshot *gp; + + if (exclude_self && cpu == this_cpu) + continue; + + gp = &global_cpu_snapshot[cpu].reg; + + __global_reg_poll(gp); + + tp = gp->thread; + printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", + (cpu == this_cpu ? '*' : ' '), cpu, + gp->tstate, gp->tpc, gp->tnpc, + ((tp && tp->task) ? tp->task->comm : "NULL"), + ((tp && tp->task) ? tp->task->pid : -1)); + + if (gp->tstate & TSTATE_PRIV) { + printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", + (void *) gp->tpc, + (void *) gp->o7, + (void *) gp->i7, + (void *) gp->rpc); + } else { + printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", + gp->tpc, gp->o7, gp->i7, gp->rpc); + } + + touch_nmi_watchdog(); + } + + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); + + spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); +} + +#ifdef CONFIG_MAGIC_SYSRQ + +static void sysrq_handle_globreg(int key) +{ + trigger_all_cpu_backtrace(); +} + +static const struct sysrq_key_op sparc_globalreg_op = { + .handler = sysrq_handle_globreg, + .help_msg = "global-regs(y)", + .action_msg = "Show Global CPU Regs", +}; + +static void __global_pmu_self(int this_cpu) +{ + struct global_pmu_snapshot *pp; + int i, num; + + if (!pcr_ops) + return; + + pp = &global_cpu_snapshot[this_cpu].pmu; + + num = 1; + if (tlb_type == hypervisor && + sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) + num = 4; + + for (i = 0; i < num; i++) { + pp->pcr[i] = pcr_ops->read_pcr(i); + pp->pic[i] = pcr_ops->read_pic(i); + } +} + +static void __global_pmu_poll(struct global_pmu_snapshot *pp) +{ + int limit = 0; + + while (!pp->pcr[0] && ++limit < 100) { + barrier(); + udelay(1); + } +} + +static void pmu_snapshot_all_cpus(void) +{ + unsigned long flags; + int this_cpu, cpu; + + spin_lock_irqsave(&global_cpu_snapshot_lock, flags); + + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); + + this_cpu = raw_smp_processor_id(); + + __global_pmu_self(this_cpu); + + smp_fetch_global_pmu(); + + for_each_online_cpu(cpu) { + struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu; + + __global_pmu_poll(pp); + + printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n", + (cpu == this_cpu ? '*' : ' '), cpu, + pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], + pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); + + touch_nmi_watchdog(); + } + + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); + + spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); +} + +static void sysrq_handle_globpmu(int key) +{ + pmu_snapshot_all_cpus(); +} + +static const struct sysrq_key_op sparc_globalpmu_op = { + .handler = sysrq_handle_globpmu, + .help_msg = "global-pmu(x)", + .action_msg = "Show Global PMU Regs", +}; + +static int __init sparc_sysrq_init(void) +{ + int ret = register_sysrq_key('y', &sparc_globalreg_op); + + if (!ret) + ret = register_sysrq_key('x', &sparc_globalpmu_op); + return ret; +} + +core_initcall(sparc_sysrq_init); + +#endif + +/* Free current thread data structures etc.. */ +void exit_thread(struct task_struct *tsk) +{ + struct thread_info *t = task_thread_info(tsk); + + if (t->utraps) { + if (t->utraps[0] < 2) + kfree (t->utraps); + else + t->utraps[0]--; + } +} + +void flush_thread(void) +{ + struct thread_info *t = current_thread_info(); + struct mm_struct *mm; + + mm = t->task->mm; + if (mm) + tsb_context_switch(mm); + + set_thread_wsaved(0); + + /* Clear FPU register state. */ + t->fpsaved[0] = 0; +} + +/* It's a bit more tricky when 64-bit tasks are involved... */ +static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) +{ + bool stack_64bit = test_thread_64bit_stack(psp); + unsigned long fp, distance, rval; + + if (stack_64bit) { + csp += STACK_BIAS; + psp += STACK_BIAS; + __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); + fp += STACK_BIAS; + if (test_thread_flag(TIF_32BIT)) + fp &= 0xffffffff; + } else + __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); + + /* Now align the stack as this is mandatory in the Sparc ABI + * due to how register windows work. This hides the + * restriction from thread libraries etc. + */ + csp &= ~15UL; + + distance = fp - psp; + rval = (csp - distance); + if (raw_copy_in_user((void __user *)rval, (void __user *)psp, distance)) + rval = 0; + else if (!stack_64bit) { + if (put_user(((u32)csp), + &(((struct reg_window32 __user *)rval)->ins[6]))) + rval = 0; + } else { + if (put_user(((u64)csp - STACK_BIAS), + &(((struct reg_window __user *)rval)->ins[6]))) + rval = 0; + else + rval = rval - STACK_BIAS; + } + + return rval; +} + +/* Standard stuff. */ +static inline void shift_window_buffer(int first_win, int last_win, + struct thread_info *t) +{ + int i; + + for (i = first_win; i < last_win; i++) { + t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; + memcpy(&t->reg_window[i], &t->reg_window[i+1], + sizeof(struct reg_window)); + } +} + +void synchronize_user_stack(void) +{ + struct thread_info *t = current_thread_info(); + unsigned long window; + + flush_user_windows(); + if ((window = get_thread_wsaved()) != 0) { + window -= 1; + do { + struct reg_window *rwin = &t->reg_window[window]; + int winsize = sizeof(struct reg_window); + unsigned long sp; + + sp = t->rwbuf_stkptrs[window]; + + if (test_thread_64bit_stack(sp)) + sp += STACK_BIAS; + else + winsize = sizeof(struct reg_window32); + + if (!copy_to_user((char __user *)sp, rwin, winsize)) { + shift_window_buffer(window, get_thread_wsaved() - 1, t); + set_thread_wsaved(get_thread_wsaved() - 1); + } + } while (window--); + } +} + +static void stack_unaligned(unsigned long sp) +{ + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp); +} + +static const char uwfault32[] = KERN_INFO \ + "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n"; +static const char uwfault64[] = KERN_INFO \ + "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n"; + +void fault_in_user_windows(struct pt_regs *regs) +{ + struct thread_info *t = current_thread_info(); + unsigned long window; + + flush_user_windows(); + window = get_thread_wsaved(); + + if (likely(window != 0)) { + window -= 1; + do { + struct reg_window *rwin = &t->reg_window[window]; + int winsize = sizeof(struct reg_window); + unsigned long sp, orig_sp; + + orig_sp = sp = t->rwbuf_stkptrs[window]; + + if (test_thread_64bit_stack(sp)) + sp += STACK_BIAS; + else + winsize = sizeof(struct reg_window32); + + if (unlikely(sp & 0x7UL)) + stack_unaligned(sp); + + if (unlikely(copy_to_user((char __user *)sp, + rwin, winsize))) { + if (show_unhandled_signals) + printk_ratelimited(is_compat_task() ? + uwfault32 : uwfault64, + current->comm, current->pid, + sp, orig_sp, + regs->tpc, + regs->u_regs[UREG_I7]); + goto barf; + } + } while (window--); + } + set_thread_wsaved(0); + return; + +barf: + set_thread_wsaved(window + 1); + force_sig(SIGSEGV); +} + +/* Copy a Sparc thread. The fork() return value conventions + * under SunOS are nothing short of bletcherous: + * Parent --> %o0 == childs pid, %o1 == 0 + * Child --> %o0 == parents pid, %o1 == 1 + */ +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long sp = args->stack; + unsigned long tls = args->tls; + struct thread_info *t = task_thread_info(p); + struct pt_regs *regs = current_pt_regs(); + struct sparc_stackf *parent_sf; + unsigned long child_stack_sz; + char *child_trap_frame; + + /* Calculate offset to stack_frame & pt_regs */ + child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ); + child_trap_frame = (task_stack_page(p) + + (THREAD_SIZE - child_stack_sz)); + + t->new_child = 1; + t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; + t->kregs = (struct pt_regs *) (child_trap_frame + + sizeof(struct sparc_stackf)); + t->fpsaved[0] = 0; + + if (unlikely(args->fn)) { + memset(child_trap_frame, 0, child_stack_sz); + __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = + (current_pt_regs()->tstate + 1) & TSTATE_CWP; + t->kregs->u_regs[UREG_G1] = (unsigned long) args->fn; + t->kregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg; + return 0; + } + + parent_sf = ((struct sparc_stackf *) regs) - 1; + memcpy(child_trap_frame, parent_sf, child_stack_sz); + if (t->flags & _TIF_32BIT) { + sp &= 0x00000000ffffffffUL; + regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; + } + t->kregs->u_regs[UREG_FP] = sp; + __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = + (regs->tstate + 1) & TSTATE_CWP; + if (sp != regs->u_regs[UREG_FP]) { + unsigned long csp; + + csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); + if (!csp) + return -EFAULT; + t->kregs->u_regs[UREG_FP] = csp; + } + if (t->utraps) + t->utraps[0]++; + + /* Set the return value for the child. */ + t->kregs->u_regs[UREG_I0] = current->pid; + t->kregs->u_regs[UREG_I1] = 1; + + /* Set the second return value for the parent. */ + regs->u_regs[UREG_I1] = 0; + + if (clone_flags & CLONE_SETTLS) + t->kregs->u_regs[UREG_G7] = tls; + + return 0; +} + +/* TIF_MCDPER in thread info flags for current task is updated lazily upon + * a context switch. Update this flag in current task's thread flags + * before dup so the dup'd task will inherit the current TIF_MCDPER flag. + */ +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + if (adi_capable()) { + register unsigned long tmp_mcdper; + + __asm__ __volatile__( + ".word 0x83438000\n\t" /* rd %mcdper, %g1 */ + "mov %%g1, %0\n\t" + : "=r" (tmp_mcdper) + : + : "g1"); + if (tmp_mcdper) + set_thread_flag(TIF_MCDPER); + else + clear_thread_flag(TIF_MCDPER); + } + + *dst = *src; + return 0; +} + +unsigned long __get_wchan(struct task_struct *task) +{ + unsigned long pc, fp, bias = 0; + struct thread_info *tp; + struct reg_window *rw; + unsigned long ret = 0; + int count = 0; + + tp = task_thread_info(task); + bias = STACK_BIAS; + fp = task_thread_info(task)->ksp + bias; + + do { + if (!kstack_valid(tp, fp)) + break; + rw = (struct reg_window *) fp; + pc = rw->ins[7]; + if (!in_sched_functions(pc)) { + ret = pc; + goto out; + } + fp = rw->ins[6] + bias; + } while (++count < 16); + +out: + return ret; +} diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h new file mode 100644 index 000000000..26a1cca7c --- /dev/null +++ b/arch/sparc/kernel/prom.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PROM_H +#define __PROM_H + +#include +#include + +void of_console_init(void); + +extern unsigned int prom_early_allocated; + +#endif /* __PROM_H */ diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c new file mode 100644 index 000000000..3df960c13 --- /dev/null +++ b/arch/sparc/kernel/prom_32.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Procedures for creating, accessing and interpreting the device tree. + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. + * {engebret|bergner}@us.ibm.com + * + * Adapted for sparc32 by David S. Miller davem@davemloft.net + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "prom.h" + +void * __init prom_early_alloc(unsigned long size) +{ + void *ret; + + ret = memblock_alloc(size, SMP_CACHE_BYTES); + if (!ret) + panic("%s: Failed to allocate %lu bytes\n", __func__, size); + + prom_early_allocated += size; + + return ret; +} + +/* The following routines deal with the black magic of fully naming a + * node. + * + * Certain well known named nodes are just the simple name string. + * + * Actual devices have an address specifier appended to the base name + * string, like this "foo@addr". The "addr" can be in any number of + * formats, and the platform plus the type of the node determine the + * format and how it is constructed. + * + * For children of the ROOT node, the naming convention is fixed and + * determined by whether this is a sun4u or sun4v system. + * + * For children of other nodes, it is bus type specific. So + * we walk up the tree until we discover a "device_type" property + * we recognize and we go from there. + */ +static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom_registers *regs; + struct property *rprop; + + rprop = of_find_property(dp, "reg", NULL); + if (!rprop) + return; + + regs = rprop->value; + sprintf(tmp_buf, "%s@%x,%x", + name, + regs->which_io, regs->phys_addr); +} + +/* "name@slot,offset" */ +static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom_registers *regs; + struct property *prop; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + sprintf(tmp_buf, "%s@%x,%x", + name, + regs->which_io, + regs->phys_addr); +} + +/* "name@devnum[,func]" */ +static void __init pci_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom_pci_registers *regs; + struct property *prop; + unsigned int devfn; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + devfn = (regs->phys_hi >> 8) & 0xff; + if (devfn & 0x07) { + sprintf(tmp_buf, "%s@%x,%x", + name, + devfn >> 3, + devfn & 0x07); + } else { + sprintf(tmp_buf, "%s@%x", + name, + devfn >> 3); + } +} + +/* "name@addrhi,addrlo" */ +static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom_registers *regs; + struct property *prop; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + + sprintf(tmp_buf, "%s@%x,%x", + name, + regs->which_io, regs->phys_addr); +} + +/* "name@irq,addrlo" */ +static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct amba_prom_registers *regs; + unsigned int *intr; + unsigned int reg0; + struct property *prop; + int interrupt = 0; + + /* In order to get a unique ID in the device tree (multiple AMBA devices + * may have the same name) the node number is printed + */ + prop = of_find_property(dp, "reg", NULL); + if (!prop) { + reg0 = (unsigned int)dp->phandle; + } else { + regs = prop->value; + reg0 = regs->phys_addr; + } + + /* Not all cores have Interrupt */ + prop = of_find_property(dp, "interrupts", NULL); + if (!prop) + intr = &interrupt; /* IRQ0 does not exist */ + else + intr = prop->value; + + sprintf(tmp_buf, "%s@%x,%x", name, *intr, reg0); +} + +static void __init __build_path_component(struct device_node *dp, char *tmp_buf) +{ + struct device_node *parent = dp->parent; + + if (parent != NULL) { + if (of_node_is_type(parent, "pci") || + of_node_is_type(parent, "pciex")) + return pci_path_component(dp, tmp_buf); + if (of_node_is_type(parent, "sbus")) + return sbus_path_component(dp, tmp_buf); + if (of_node_is_type(parent, "ebus")) + return ebus_path_component(dp, tmp_buf); + if (of_node_is_type(parent, "ambapp")) + return ambapp_path_component(dp, tmp_buf); + + /* "isa" is handled with platform naming */ + } + + /* Use platform naming convention. */ + return sparc32_path_component(dp, tmp_buf); +} + +char * __init build_path_component(struct device_node *dp) +{ + const char *name = of_get_property(dp, "name", NULL); + char tmp_buf[64], *n; + + tmp_buf[0] = '\0'; + __build_path_component(dp, tmp_buf); + if (tmp_buf[0] == '\0') + strcpy(tmp_buf, name); + + n = prom_early_alloc(strlen(tmp_buf) + 1); + strcpy(n, tmp_buf); + + return n; +} + +extern void restore_current(void); + +void __init of_console_init(void) +{ + char *msg = "OF stdout device is: %s\n"; + struct device_node *dp; + unsigned long flags; + const char *type; + phandle node; + int skip, tmp, fd; + + of_console_path = prom_early_alloc(256); + + switch (prom_vers) { + case PROM_V0: + skip = 0; + switch (*romvec->pv_stdout) { + case PROMDEV_SCREEN: + type = "display"; + break; + + case PROMDEV_TTYB: + skip = 1; + fallthrough; + + case PROMDEV_TTYA: + type = "serial"; + break; + + default: + prom_printf("Invalid PROM_V0 stdout value %u\n", + *romvec->pv_stdout); + prom_halt(); + } + + tmp = skip; + for_each_node_by_type(dp, type) { + if (!tmp--) + break; + } + if (!dp) { + prom_printf("Cannot find PROM_V0 console node.\n"); + prom_halt(); + } + of_console_device = dp; + + sprintf(of_console_path, "%pOF", dp); + if (!strcmp(type, "serial")) { + strcat(of_console_path, + (skip ? ":b" : ":a")); + } + break; + + default: + case PROM_V2: + case PROM_V3: + fd = *romvec->pv_v2bootargs.fd_stdout; + + spin_lock_irqsave(&prom_lock, flags); + node = (*romvec->pv_v2devops.v2_inst2pkg)(fd); + restore_current(); + spin_unlock_irqrestore(&prom_lock, flags); + + if (!node) { + prom_printf("Cannot resolve stdout node from " + "instance %08x.\n", fd); + prom_halt(); + } + dp = of_find_node_by_phandle(node); + + if (!of_node_is_type(dp, "display") && + !of_node_is_type(dp, "serial")) { + prom_printf("Console device_type is neither display " + "nor serial.\n"); + prom_halt(); + } + + of_console_device = dp; + + if (prom_vers == PROM_V2) { + sprintf(of_console_path, "%pOF", dp); + switch (*romvec->pv_stdout) { + case PROMDEV_TTYA: + strcat(of_console_path, ":a"); + break; + case PROMDEV_TTYB: + strcat(of_console_path, ":b"); + break; + } + } else { + const char *path; + + dp = of_find_node_by_path("/"); + path = of_get_property(dp, "stdout-path", NULL); + if (!path) { + prom_printf("No stdout-path in root node.\n"); + prom_halt(); + } + strcpy(of_console_path, path); + } + break; + } + + of_console_options = strrchr(of_console_path, ':'); + if (of_console_options) { + of_console_options++; + if (*of_console_options == '\0') + of_console_options = NULL; + } + + printk(msg, of_console_path); +} + +void __init of_fill_in_cpu_data(void) +{ +} + +void __init irq_trans_init(struct device_node *dp) +{ +} diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c new file mode 100644 index 000000000..f883a50fa --- /dev/null +++ b/arch/sparc/kernel/prom_64.c @@ -0,0 +1,633 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Procedures for creating, accessing and interpreting the device tree. + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. + * {engebret|bergner}@us.ibm.com + * + * Adapted for sparc64 by David S. Miller davem@davemloft.net + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "prom.h" + +void * __init prom_early_alloc(unsigned long size) +{ + void *ret = memblock_alloc(size, SMP_CACHE_BYTES); + + if (!ret) { + prom_printf("prom_early_alloc(%lu) failed\n", size); + prom_halt(); + } + + prom_early_allocated += size; + + return ret; +} + +/* The following routines deal with the black magic of fully naming a + * node. + * + * Certain well known named nodes are just the simple name string. + * + * Actual devices have an address specifier appended to the base name + * string, like this "foo@addr". The "addr" can be in any number of + * formats, and the platform plus the type of the node determine the + * format and how it is constructed. + * + * For children of the ROOT node, the naming convention is fixed and + * determined by whether this is a sun4u or sun4v system. + * + * For children of other nodes, it is bus type specific. So + * we walk up the tree until we discover a "device_type" property + * we recognize and we go from there. + * + * As an example, the boot device on my workstation has a full path: + * + * /pci@1e,600000/ide@d/disk@0,0:c + */ +static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom64_registers *regs; + struct property *rprop; + u32 high_bits, low_bits, type; + + rprop = of_find_property(dp, "reg", NULL); + if (!rprop) + return; + + regs = rprop->value; + if (!of_node_is_root(dp->parent)) { + sprintf(tmp_buf, "%s@%x,%x", + name, + (unsigned int) (regs->phys_addr >> 32UL), + (unsigned int) (regs->phys_addr & 0xffffffffUL)); + return; + } + + type = regs->phys_addr >> 60UL; + high_bits = (regs->phys_addr >> 32UL) & 0x0fffffffUL; + low_bits = (regs->phys_addr & 0xffffffffUL); + + if (type == 0 || type == 8) { + const char *prefix = (type == 0) ? "m" : "i"; + + if (low_bits) + sprintf(tmp_buf, "%s@%s%x,%x", + name, prefix, + high_bits, low_bits); + else + sprintf(tmp_buf, "%s@%s%x", + name, + prefix, + high_bits); + } else if (type == 12) { + sprintf(tmp_buf, "%s@%x", + name, high_bits); + } +} + +static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom64_registers *regs; + struct property *prop; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + if (!of_node_is_root(dp->parent)) { + sprintf(tmp_buf, "%s@%x,%x", + name, + (unsigned int) (regs->phys_addr >> 32UL), + (unsigned int) (regs->phys_addr & 0xffffffffUL)); + return; + } + + prop = of_find_property(dp, "upa-portid", NULL); + if (!prop) + prop = of_find_property(dp, "portid", NULL); + if (prop) { + unsigned long mask = 0xffffffffUL; + + if (tlb_type >= cheetah) + mask = 0x7fffff; + + sprintf(tmp_buf, "%s@%x,%x", + name, + *(u32 *)prop->value, + (unsigned int) (regs->phys_addr & mask)); + } +} + +/* "name@slot,offset" */ +static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom_registers *regs; + struct property *prop; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + sprintf(tmp_buf, "%s@%x,%x", + name, + regs->which_io, + regs->phys_addr); +} + +/* "name@devnum[,func]" */ +static void __init pci_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom_pci_registers *regs; + struct property *prop; + unsigned int devfn; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + devfn = (regs->phys_hi >> 8) & 0xff; + if (devfn & 0x07) { + sprintf(tmp_buf, "%s@%x,%x", + name, + devfn >> 3, + devfn & 0x07); + } else { + sprintf(tmp_buf, "%s@%x", + name, + devfn >> 3); + } +} + +/* "name@UPA_PORTID,offset" */ +static void __init upa_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom64_registers *regs; + struct property *prop; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + + prop = of_find_property(dp, "upa-portid", NULL); + if (!prop) + return; + + sprintf(tmp_buf, "%s@%x,%x", + name, + *(u32 *) prop->value, + (unsigned int) (regs->phys_addr & 0xffffffffUL)); +} + +/* "name@reg" */ +static void __init vdev_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct property *prop; + u32 *regs; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + + sprintf(tmp_buf, "%s@%x", name, *regs); +} + +/* "name@addrhi,addrlo" */ +static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct linux_prom64_registers *regs; + struct property *prop; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + + sprintf(tmp_buf, "%s@%x,%x", + name, + (unsigned int) (regs->phys_addr >> 32UL), + (unsigned int) (regs->phys_addr & 0xffffffffUL)); +} + +/* "name@bus,addr" */ +static void __init i2c_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct property *prop; + u32 *regs; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + + /* This actually isn't right... should look at the #address-cells + * property of the i2c bus node etc. etc. + */ + sprintf(tmp_buf, "%s@%x,%x", + name, regs[0], regs[1]); +} + +/* "name@reg0[,reg1]" */ +static void __init usb_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct property *prop; + u32 *regs; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + + if (prop->length == sizeof(u32) || regs[1] == 1) { + sprintf(tmp_buf, "%s@%x", + name, regs[0]); + } else { + sprintf(tmp_buf, "%s@%x,%x", + name, regs[0], regs[1]); + } +} + +/* "name@reg0reg1[,reg2reg3]" */ +static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf) +{ + const char *name = of_get_property(dp, "name", NULL); + struct property *prop; + u32 *regs; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + + regs = prop->value; + + if (regs[2] || regs[3]) { + sprintf(tmp_buf, "%s@%08x%08x,%04x%08x", + name, regs[0], regs[1], regs[2], regs[3]); + } else { + sprintf(tmp_buf, "%s@%08x%08x", + name, regs[0], regs[1]); + } +} + +static void __init __build_path_component(struct device_node *dp, char *tmp_buf) +{ + struct device_node *parent = dp->parent; + + if (parent != NULL) { + if (of_node_is_type(parent, "pci") || + of_node_is_type(parent, "pciex")) { + pci_path_component(dp, tmp_buf); + return; + } + if (of_node_is_type(parent, "sbus")) { + sbus_path_component(dp, tmp_buf); + return; + } + if (of_node_is_type(parent, "upa")) { + upa_path_component(dp, tmp_buf); + return; + } + if (of_node_is_type(parent, "ebus")) { + ebus_path_component(dp, tmp_buf); + return; + } + if (of_node_name_eq(parent, "usb") || + of_node_name_eq(parent, "hub")) { + usb_path_component(dp, tmp_buf); + return; + } + if (of_node_is_type(parent, "i2c")) { + i2c_path_component(dp, tmp_buf); + return; + } + if (of_node_is_type(parent, "firewire")) { + ieee1394_path_component(dp, tmp_buf); + return; + } + if (of_node_is_type(parent, "virtual-devices")) { + vdev_path_component(dp, tmp_buf); + return; + } + /* "isa" is handled with platform naming */ + } + + /* Use platform naming convention. */ + if (tlb_type == hypervisor) { + sun4v_path_component(dp, tmp_buf); + return; + } else { + sun4u_path_component(dp, tmp_buf); + } +} + +char * __init build_path_component(struct device_node *dp) +{ + const char *name = of_get_property(dp, "name", NULL); + char tmp_buf[64], *n; + + tmp_buf[0] = '\0'; + __build_path_component(dp, tmp_buf); + if (tmp_buf[0] == '\0') + strcpy(tmp_buf, name); + + n = prom_early_alloc(strlen(tmp_buf) + 1); + strcpy(n, tmp_buf); + + return n; +} + +static const char *get_mid_prop(void) +{ + return (tlb_type == spitfire ? "upa-portid" : "portid"); +} + +bool arch_find_n_match_cpu_physical_id(struct device_node *cpun, + int cpu, unsigned int *thread) +{ + const char *mid_prop = get_mid_prop(); + int this_cpu_id; + + /* On hypervisor based platforms we interrogate the 'reg' + * property. On everything else we look for a 'upa-portid', + * 'portid', or 'cpuid' property. + */ + + if (tlb_type == hypervisor) { + struct property *prop = of_find_property(cpun, "reg", NULL); + u32 *regs; + + if (!prop) { + pr_warn("CPU node missing reg property\n"); + return false; + } + regs = prop->value; + this_cpu_id = regs[0] & 0x0fffffff; + } else { + this_cpu_id = of_getintprop_default(cpun, mid_prop, -1); + + if (this_cpu_id < 0) { + mid_prop = "cpuid"; + this_cpu_id = of_getintprop_default(cpun, mid_prop, -1); + } + if (this_cpu_id < 0) { + pr_warn("CPU node missing cpu ID property\n"); + return false; + } + } + if (this_cpu_id == cpu) { + if (thread) { + int proc_id = cpu_data(cpu).proc_id; + + /* On sparc64, the cpu thread information is obtained + * either from OBP or the machine description. We've + * actually probed this information already long before + * this interface gets called so instead of interrogating + * both the OF node and the MDESC again, just use what + * we discovered already. + */ + if (proc_id < 0) + proc_id = 0; + *thread = proc_id; + } + return true; + } + return false; +} + +static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg) +{ + struct device_node *dp; + const char *mid_prop; + + mid_prop = get_mid_prop(); + for_each_node_by_type(dp, "cpu") { + int cpuid = of_getintprop_default(dp, mid_prop, -1); + const char *this_mid_prop = mid_prop; + void *ret; + + if (cpuid < 0) { + this_mid_prop = "cpuid"; + cpuid = of_getintprop_default(dp, this_mid_prop, -1); + } + if (cpuid < 0) { + prom_printf("OF: Serious problem, cpu lacks " + "%s property", this_mid_prop); + prom_halt(); + } +#ifdef CONFIG_SMP + if (cpuid >= NR_CPUS) { + printk(KERN_WARNING "Ignoring CPU %d which is " + ">= NR_CPUS (%d)\n", + cpuid, NR_CPUS); + continue; + } +#endif + ret = func(dp, cpuid, arg); + if (ret) + return ret; + } + return NULL; +} + +static void *check_cpu_node(struct device_node *dp, int cpuid, int id) +{ + if (id == cpuid) + return dp; + return NULL; +} + +struct device_node *of_find_node_by_cpuid(int cpuid) +{ + return of_iterate_over_cpus(check_cpu_node, cpuid); +} + +static void *record_one_cpu(struct device_node *dp, int cpuid, int arg) +{ + ncpus_probed++; +#ifdef CONFIG_SMP + set_cpu_present(cpuid, true); + set_cpu_possible(cpuid, true); +#endif + return NULL; +} + +void __init of_populate_present_mask(void) +{ + if (tlb_type == hypervisor) + return; + + ncpus_probed = 0; + of_iterate_over_cpus(record_one_cpu, 0); +} + +static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg) +{ + struct device_node *portid_parent = NULL; + int portid = -1; + + if (of_find_property(dp, "cpuid", NULL)) { + int limit = 2; + + portid_parent = dp; + while (limit--) { + portid_parent = portid_parent->parent; + if (!portid_parent) + break; + portid = of_getintprop_default(portid_parent, + "portid", -1); + if (portid >= 0) + break; + } + } + +#ifndef CONFIG_SMP + /* On uniprocessor we only want the values for the + * real physical cpu the kernel booted onto, however + * cpu_data() only has one entry at index 0. + */ + if (cpuid != real_hard_smp_processor_id()) + return NULL; + cpuid = 0; +#endif + + cpu_data(cpuid).clock_tick = + of_getintprop_default(dp, "clock-frequency", 0); + + if (portid_parent) { + cpu_data(cpuid).dcache_size = + of_getintprop_default(dp, "l1-dcache-size", + 16 * 1024); + cpu_data(cpuid).dcache_line_size = + of_getintprop_default(dp, "l1-dcache-line-size", + 32); + cpu_data(cpuid).icache_size = + of_getintprop_default(dp, "l1-icache-size", + 8 * 1024); + cpu_data(cpuid).icache_line_size = + of_getintprop_default(dp, "l1-icache-line-size", + 32); + cpu_data(cpuid).ecache_size = + of_getintprop_default(dp, "l2-cache-size", 0); + cpu_data(cpuid).ecache_line_size = + of_getintprop_default(dp, "l2-cache-line-size", 0); + if (!cpu_data(cpuid).ecache_size || + !cpu_data(cpuid).ecache_line_size) { + cpu_data(cpuid).ecache_size = + of_getintprop_default(portid_parent, + "l2-cache-size", + (4 * 1024 * 1024)); + cpu_data(cpuid).ecache_line_size = + of_getintprop_default(portid_parent, + "l2-cache-line-size", 64); + } + + cpu_data(cpuid).core_id = portid + 1; + cpu_data(cpuid).proc_id = portid; + } else { + cpu_data(cpuid).dcache_size = + of_getintprop_default(dp, "dcache-size", 16 * 1024); + cpu_data(cpuid).dcache_line_size = + of_getintprop_default(dp, "dcache-line-size", 32); + + cpu_data(cpuid).icache_size = + of_getintprop_default(dp, "icache-size", 16 * 1024); + cpu_data(cpuid).icache_line_size = + of_getintprop_default(dp, "icache-line-size", 32); + + cpu_data(cpuid).ecache_size = + of_getintprop_default(dp, "ecache-size", + (4 * 1024 * 1024)); + cpu_data(cpuid).ecache_line_size = + of_getintprop_default(dp, "ecache-line-size", 64); + + cpu_data(cpuid).core_id = 0; + cpu_data(cpuid).proc_id = -1; + } + + return NULL; +} + +void __init of_fill_in_cpu_data(void) +{ + if (tlb_type == hypervisor) + return; + + of_iterate_over_cpus(fill_in_one_cpu, 0); + + smp_fill_in_sib_core_maps(); +} + +void __init of_console_init(void) +{ + char *msg = "OF stdout device is: %s\n"; + struct device_node *dp; + phandle node; + + of_console_path = prom_early_alloc(256); + if (prom_ihandle2path(prom_stdout, of_console_path, 256) < 0) { + prom_printf("Cannot obtain path of stdout.\n"); + prom_halt(); + } + of_console_options = strrchr(of_console_path, ':'); + if (of_console_options) { + of_console_options++; + if (*of_console_options == '\0') + of_console_options = NULL; + } + + node = prom_inst2pkg(prom_stdout); + if (!node) { + prom_printf("Cannot resolve stdout node from " + "instance %08x.\n", prom_stdout); + prom_halt(); + } + + dp = of_find_node_by_phandle(node); + + if (!of_node_is_type(dp, "display") && !of_node_is_type(dp, "serial")) { + prom_printf("Console device_type is neither display " + "nor serial.\n"); + prom_halt(); + } + + of_console_device = dp; + + printk(msg, of_console_path); +} diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c new file mode 100644 index 000000000..c9ec70888 --- /dev/null +++ b/arch/sparc/kernel/prom_common.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* prom_common.c: OF device tree support common code. + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. + * {engebret|bergner}@us.ibm.com + * + * Adapted for sparc by David S. Miller davem@davemloft.net + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "prom.h" + +struct device_node *of_console_device; +EXPORT_SYMBOL(of_console_device); + +char *of_console_path; +EXPORT_SYMBOL(of_console_path); + +char *of_console_options; +EXPORT_SYMBOL(of_console_options); + +int of_getintprop_default(struct device_node *np, const char *name, int def) +{ + struct property *prop; + int len; + + prop = of_find_property(np, name, &len); + if (!prop || len != 4) + return def; + + return *(int *) prop->value; +} +EXPORT_SYMBOL(of_getintprop_default); + +DEFINE_MUTEX(of_set_property_mutex); +EXPORT_SYMBOL(of_set_property_mutex); + +int of_set_property(struct device_node *dp, const char *name, void *val, int len) +{ + struct property **prevp; + unsigned long flags; + void *new_val; + int err; + + new_val = kmemdup(val, len, GFP_KERNEL); + if (!new_val) + return -ENOMEM; + + err = -ENODEV; + + mutex_lock(&of_set_property_mutex); + raw_spin_lock_irqsave(&devtree_lock, flags); + prevp = &dp->properties; + while (*prevp) { + struct property *prop = *prevp; + + if (!strcasecmp(prop->name, name)) { + void *old_val = prop->value; + int ret; + + ret = prom_setprop(dp->phandle, name, val, len); + + err = -EINVAL; + if (ret >= 0) { + prop->value = new_val; + prop->length = len; + + if (OF_IS_DYNAMIC(prop)) + kfree(old_val); + + OF_MARK_DYNAMIC(prop); + + err = 0; + } + break; + } + prevp = &(*prevp)->next; + } + raw_spin_unlock_irqrestore(&devtree_lock, flags); + mutex_unlock(&of_set_property_mutex); + + /* XXX Upate procfs if necessary... */ + + return err; +} +EXPORT_SYMBOL(of_set_property); + +int of_find_in_proplist(const char *list, const char *match, int len) +{ + while (len > 0) { + int l; + + if (!strcmp(list, match)) + return 1; + l = strlen(list) + 1; + list += l; + len -= l; + } + return 0; +} +EXPORT_SYMBOL(of_find_in_proplist); + +/* + * SPARC32 and SPARC64's prom_nextprop() do things differently + * here, despite sharing the same interface. SPARC32 doesn't fill in 'buf', + * returning NULL on an error. SPARC64 fills in 'buf', but sets it to an + * empty string upon error. + */ +static int __init handle_nextprop_quirks(char *buf, const char *name) +{ + if (!name || strlen(name) == 0) + return -1; + +#ifdef CONFIG_SPARC32 + strcpy(buf, name); +#endif + return 0; +} + +static int __init prom_common_nextprop(phandle node, char *prev, char *buf) +{ + const char *name; + + buf[0] = '\0'; + name = prom_nextprop(node, prev, buf); + return handle_nextprop_quirks(buf, name); +} + +unsigned int prom_early_allocated __initdata; + +static struct of_pdt_ops prom_sparc_ops __initdata = { + .nextprop = prom_common_nextprop, + .getproplen = prom_getproplen, + .getproperty = prom_getproperty, + .getchild = prom_getchild, + .getsibling = prom_getsibling, +}; + +void __init prom_build_devicetree(void) +{ + of_pdt_build_devicetree(prom_root_node, &prom_sparc_ops); + of_console_init(); + + pr_info("PROM: Built device tree with %u bytes of memory.\n", + prom_early_allocated); +} diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c new file mode 100644 index 000000000..28aff1c52 --- /dev/null +++ b/arch/sparc/kernel/prom_irqtrans.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "prom.h" + +#ifdef CONFIG_PCI +/* PSYCHO interrupt mapping support. */ +#define PSYCHO_IMAP_A_SLOT0 0x0c00UL +#define PSYCHO_IMAP_B_SLOT0 0x0c20UL +static unsigned long psycho_pcislot_imap_offset(unsigned long ino) +{ + unsigned int bus = (ino & 0x10) >> 4; + unsigned int slot = (ino & 0x0c) >> 2; + + if (bus == 0) + return PSYCHO_IMAP_A_SLOT0 + (slot * 8); + else + return PSYCHO_IMAP_B_SLOT0 + (slot * 8); +} + +#define PSYCHO_OBIO_IMAP_BASE 0x1000UL + +#define PSYCHO_ONBOARD_IRQ_BASE 0x20 +#define psycho_onboard_imap_offset(__ino) \ + (PSYCHO_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3)) + +#define PSYCHO_ICLR_A_SLOT0 0x1400UL +#define PSYCHO_ICLR_SCSI 0x1800UL + +#define psycho_iclr_offset(ino) \ + ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \ + (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3))) + +static unsigned int psycho_irq_build(struct device_node *dp, + unsigned int ino, + void *_data) +{ + unsigned long controller_regs = (unsigned long) _data; + unsigned long imap, iclr; + unsigned long imap_off, iclr_off; + int inofixup = 0; + + ino &= 0x3f; + if (ino < PSYCHO_ONBOARD_IRQ_BASE) { + /* PCI slot */ + imap_off = psycho_pcislot_imap_offset(ino); + } else { + /* Onboard device */ + imap_off = psycho_onboard_imap_offset(ino); + } + + /* Now build the IRQ bucket. */ + imap = controller_regs + imap_off; + + iclr_off = psycho_iclr_offset(ino); + iclr = controller_regs + iclr_off; + + if ((ino & 0x20) == 0) + inofixup = ino & 0x03; + + return build_irq(inofixup, iclr, imap); +} + +static void __init psycho_irq_trans_init(struct device_node *dp) +{ + const struct linux_prom64_registers *regs; + + dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); + dp->irq_trans->irq_build = psycho_irq_build; + + regs = of_get_property(dp, "reg", NULL); + dp->irq_trans->data = (void *) regs[2].phys_addr; +} + +#define sabre_read(__reg) \ +({ u64 __ret; \ + __asm__ __volatile__("ldxa [%1] %2, %0" \ + : "=r" (__ret) \ + : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ + : "memory"); \ + __ret; \ +}) + +struct sabre_irq_data { + unsigned long controller_regs; + unsigned int pci_first_busno; +}; +#define SABRE_CONFIGSPACE 0x001000000UL +#define SABRE_WRSYNC 0x1c20UL + +#define SABRE_CONFIG_BASE(CONFIG_SPACE) \ + (CONFIG_SPACE | (1UL << 24)) +#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \ + (((unsigned long)(BUS) << 16) | \ + ((unsigned long)(DEVFN) << 8) | \ + ((unsigned long)(REG))) + +/* When a device lives behind a bridge deeper in the PCI bus topology + * than APB, a special sequence must run to make sure all pending DMA + * transfers at the time of IRQ delivery are visible in the coherency + * domain by the cpu. This sequence is to perform a read on the far + * side of the non-APB bridge, then perform a read of Sabre's DMA + * write-sync register. + */ +static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2) +{ + unsigned int phys_hi = (unsigned int) (unsigned long) _arg1; + struct sabre_irq_data *irq_data = _arg2; + unsigned long controller_regs = irq_data->controller_regs; + unsigned long sync_reg = controller_regs + SABRE_WRSYNC; + unsigned long config_space = controller_regs + SABRE_CONFIGSPACE; + unsigned int bus, devfn; + u16 _unused; + + config_space = SABRE_CONFIG_BASE(config_space); + + bus = (phys_hi >> 16) & 0xff; + devfn = (phys_hi >> 8) & 0xff; + + config_space |= SABRE_CONFIG_ENCODE(bus, devfn, 0x00); + + __asm__ __volatile__("membar #Sync\n\t" + "lduha [%1] %2, %0\n\t" + "membar #Sync" + : "=r" (_unused) + : "r" ((u16 *) config_space), + "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + + sabre_read(sync_reg); +} + +#define SABRE_IMAP_A_SLOT0 0x0c00UL +#define SABRE_IMAP_B_SLOT0 0x0c20UL +#define SABRE_ICLR_A_SLOT0 0x1400UL +#define SABRE_ICLR_B_SLOT0 0x1480UL +#define SABRE_ICLR_SCSI 0x1800UL +#define SABRE_ICLR_ETH 0x1808UL +#define SABRE_ICLR_BPP 0x1810UL +#define SABRE_ICLR_AU_REC 0x1818UL +#define SABRE_ICLR_AU_PLAY 0x1820UL +#define SABRE_ICLR_PFAIL 0x1828UL +#define SABRE_ICLR_KMS 0x1830UL +#define SABRE_ICLR_FLPY 0x1838UL +#define SABRE_ICLR_SHW 0x1840UL +#define SABRE_ICLR_KBD 0x1848UL +#define SABRE_ICLR_MS 0x1850UL +#define SABRE_ICLR_SER 0x1858UL +#define SABRE_ICLR_UE 0x1870UL +#define SABRE_ICLR_CE 0x1878UL +#define SABRE_ICLR_PCIERR 0x1880UL + +static unsigned long sabre_pcislot_imap_offset(unsigned long ino) +{ + unsigned int bus = (ino & 0x10) >> 4; + unsigned int slot = (ino & 0x0c) >> 2; + + if (bus == 0) + return SABRE_IMAP_A_SLOT0 + (slot * 8); + else + return SABRE_IMAP_B_SLOT0 + (slot * 8); +} + +#define SABRE_OBIO_IMAP_BASE 0x1000UL +#define SABRE_ONBOARD_IRQ_BASE 0x20 +#define sabre_onboard_imap_offset(__ino) \ + (SABRE_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3)) + +#define sabre_iclr_offset(ino) \ + ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \ + (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3))) + +static int sabre_device_needs_wsync(struct device_node *dp) +{ + struct device_node *parent = dp->parent; + const char *parent_model, *parent_compat; + + /* This traversal up towards the root is meant to + * handle two cases: + * + * 1) non-PCI bus sitting under PCI, such as 'ebus' + * 2) the PCI controller interrupts themselves, which + * will use the sabre_irq_build but do not need + * the DMA synchronization handling + */ + while (parent) { + if (of_node_is_type(parent, "pci")) + break; + parent = parent->parent; + } + + if (!parent) + return 0; + + parent_model = of_get_property(parent, + "model", NULL); + if (parent_model && + (!strcmp(parent_model, "SUNW,sabre") || + !strcmp(parent_model, "SUNW,simba"))) + return 0; + + parent_compat = of_get_property(parent, + "compatible", NULL); + if (parent_compat && + (!strcmp(parent_compat, "pci108e,a000") || + !strcmp(parent_compat, "pci108e,a001"))) + return 0; + + return 1; +} + +static unsigned int sabre_irq_build(struct device_node *dp, + unsigned int ino, + void *_data) +{ + struct sabre_irq_data *irq_data = _data; + unsigned long controller_regs = irq_data->controller_regs; + const struct linux_prom_pci_registers *regs; + unsigned long imap, iclr; + unsigned long imap_off, iclr_off; + int inofixup = 0; + int irq; + + ino &= 0x3f; + if (ino < SABRE_ONBOARD_IRQ_BASE) { + /* PCI slot */ + imap_off = sabre_pcislot_imap_offset(ino); + } else { + /* onboard device */ + imap_off = sabre_onboard_imap_offset(ino); + } + + /* Now build the IRQ bucket. */ + imap = controller_regs + imap_off; + + iclr_off = sabre_iclr_offset(ino); + iclr = controller_regs + iclr_off; + + if ((ino & 0x20) == 0) + inofixup = ino & 0x03; + + irq = build_irq(inofixup, iclr, imap); + + /* If the parent device is a PCI<->PCI bridge other than + * APB, we have to install a pre-handler to ensure that + * all pending DMA is drained before the interrupt handler + * is run. + */ + regs = of_get_property(dp, "reg", NULL); + if (regs && sabre_device_needs_wsync(dp)) { + irq_install_pre_handler(irq, + sabre_wsync_handler, + (void *) (long) regs->phys_hi, + (void *) irq_data); + } + + return irq; +} + +static void __init sabre_irq_trans_init(struct device_node *dp) +{ + const struct linux_prom64_registers *regs; + struct sabre_irq_data *irq_data; + const u32 *busrange; + + dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); + dp->irq_trans->irq_build = sabre_irq_build; + + irq_data = prom_early_alloc(sizeof(struct sabre_irq_data)); + + regs = of_get_property(dp, "reg", NULL); + irq_data->controller_regs = regs[0].phys_addr; + + busrange = of_get_property(dp, "bus-range", NULL); + irq_data->pci_first_busno = busrange[0]; + + dp->irq_trans->data = irq_data; +} + +/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the + * imap/iclr registers are per-PBM. + */ +#define SCHIZO_IMAP_BASE 0x1000UL +#define SCHIZO_ICLR_BASE 0x1400UL + +static unsigned long schizo_imap_offset(unsigned long ino) +{ + return SCHIZO_IMAP_BASE + (ino * 8UL); +} + +static unsigned long schizo_iclr_offset(unsigned long ino) +{ + return SCHIZO_ICLR_BASE + (ino * 8UL); +} + +static unsigned long schizo_ino_to_iclr(unsigned long pbm_regs, + unsigned int ino) +{ + + return pbm_regs + schizo_iclr_offset(ino); +} + +static unsigned long schizo_ino_to_imap(unsigned long pbm_regs, + unsigned int ino) +{ + return pbm_regs + schizo_imap_offset(ino); +} + +#define schizo_read(__reg) \ +({ u64 __ret; \ + __asm__ __volatile__("ldxa [%1] %2, %0" \ + : "=r" (__ret) \ + : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ + : "memory"); \ + __ret; \ +}) +#define schizo_write(__reg, __val) \ + __asm__ __volatile__("stxa %0, [%1] %2" \ + : /* no outputs */ \ + : "r" (__val), "r" (__reg), \ + "i" (ASI_PHYS_BYPASS_EC_E) \ + : "memory") + +static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2) +{ + unsigned long sync_reg = (unsigned long) _arg2; + u64 mask = 1UL << (ino & IMAP_INO); + u64 val; + int limit; + + schizo_write(sync_reg, mask); + + limit = 100000; + val = 0; + while (--limit) { + val = schizo_read(sync_reg); + if (!(val & mask)) + break; + } + if (limit <= 0) { + printk("tomatillo_wsync_handler: DMA won't sync [%llx:%llx]\n", + val, mask); + } + + if (_arg1) { + static unsigned char cacheline[64] + __attribute__ ((aligned (64))); + + __asm__ __volatile__("rd %%fprs, %0\n\t" + "or %0, %4, %1\n\t" + "wr %1, 0x0, %%fprs\n\t" + "stda %%f0, [%5] %6\n\t" + "wr %0, 0x0, %%fprs\n\t" + "membar #Sync" + : "=&r" (mask), "=&r" (val) + : "0" (mask), "1" (val), + "i" (FPRS_FEF), "r" (&cacheline[0]), + "i" (ASI_BLK_COMMIT_P)); + } +} + +struct schizo_irq_data { + unsigned long pbm_regs; + unsigned long sync_reg; + u32 portid; + int chip_version; +}; + +static unsigned int schizo_irq_build(struct device_node *dp, + unsigned int ino, + void *_data) +{ + struct schizo_irq_data *irq_data = _data; + unsigned long pbm_regs = irq_data->pbm_regs; + unsigned long imap, iclr; + int ign_fixup; + int irq; + int is_tomatillo; + + ino &= 0x3f; + + /* Now build the IRQ bucket. */ + imap = schizo_ino_to_imap(pbm_regs, ino); + iclr = schizo_ino_to_iclr(pbm_regs, ino); + + /* On Schizo, no inofixup occurs. This is because each + * INO has it's own IMAP register. On Psycho and Sabre + * there is only one IMAP register for each PCI slot even + * though four different INOs can be generated by each + * PCI slot. + * + * But, for JBUS variants (essentially, Tomatillo), we have + * to fixup the lowest bit of the interrupt group number. + */ + ign_fixup = 0; + + is_tomatillo = (irq_data->sync_reg != 0UL); + + if (is_tomatillo) { + if (irq_data->portid & 1) + ign_fixup = (1 << 6); + } + + irq = build_irq(ign_fixup, iclr, imap); + + if (is_tomatillo) { + irq_install_pre_handler(irq, + tomatillo_wsync_handler, + ((irq_data->chip_version <= 4) ? + (void *) 1 : (void *) 0), + (void *) irq_data->sync_reg); + } + + return irq; +} + +static void __init __schizo_irq_trans_init(struct device_node *dp, + int is_tomatillo) +{ + const struct linux_prom64_registers *regs; + struct schizo_irq_data *irq_data; + + dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); + dp->irq_trans->irq_build = schizo_irq_build; + + irq_data = prom_early_alloc(sizeof(struct schizo_irq_data)); + + regs = of_get_property(dp, "reg", NULL); + dp->irq_trans->data = irq_data; + + irq_data->pbm_regs = regs[0].phys_addr; + if (is_tomatillo) + irq_data->sync_reg = regs[3].phys_addr + 0x1a18UL; + else + irq_data->sync_reg = 0UL; + irq_data->portid = of_getintprop_default(dp, "portid", 0); + irq_data->chip_version = of_getintprop_default(dp, "version#", 0); +} + +static void __init schizo_irq_trans_init(struct device_node *dp) +{ + __schizo_irq_trans_init(dp, 0); +} + +static void __init tomatillo_irq_trans_init(struct device_node *dp) +{ + __schizo_irq_trans_init(dp, 1); +} + +static unsigned int pci_sun4v_irq_build(struct device_node *dp, + unsigned int devino, + void *_data) +{ + u32 devhandle = (u32) (unsigned long) _data; + + return sun4v_build_irq(devhandle, devino); +} + +static void __init pci_sun4v_irq_trans_init(struct device_node *dp) +{ + const struct linux_prom64_registers *regs; + + dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); + dp->irq_trans->irq_build = pci_sun4v_irq_build; + + regs = of_get_property(dp, "reg", NULL); + dp->irq_trans->data = (void *) (unsigned long) + ((regs->phys_addr >> 32UL) & 0x0fffffff); +} + +struct fire_irq_data { + unsigned long pbm_regs; + u32 portid; +}; + +#define FIRE_IMAP_BASE 0x001000 +#define FIRE_ICLR_BASE 0x001400 + +static unsigned long fire_imap_offset(unsigned long ino) +{ + return FIRE_IMAP_BASE + (ino * 8UL); +} + +static unsigned long fire_iclr_offset(unsigned long ino) +{ + return FIRE_ICLR_BASE + (ino * 8UL); +} + +static unsigned long fire_ino_to_iclr(unsigned long pbm_regs, + unsigned int ino) +{ + return pbm_regs + fire_iclr_offset(ino); +} + +static unsigned long fire_ino_to_imap(unsigned long pbm_regs, + unsigned int ino) +{ + return pbm_regs + fire_imap_offset(ino); +} + +static unsigned int fire_irq_build(struct device_node *dp, + unsigned int ino, + void *_data) +{ + struct fire_irq_data *irq_data = _data; + unsigned long pbm_regs = irq_data->pbm_regs; + unsigned long imap, iclr; + unsigned long int_ctrlr; + + ino &= 0x3f; + + /* Now build the IRQ bucket. */ + imap = fire_ino_to_imap(pbm_regs, ino); + iclr = fire_ino_to_iclr(pbm_regs, ino); + + /* Set the interrupt controller number. */ + int_ctrlr = 1 << 6; + upa_writeq(int_ctrlr, imap); + + /* The interrupt map registers do not have an INO field + * like other chips do. They return zero in the INO + * field, and the interrupt controller number is controlled + * in bits 6 to 9. So in order for build_irq() to get + * the INO right we pass it in as part of the fixup + * which will get added to the map register zero value + * read by build_irq(). + */ + ino |= (irq_data->portid << 6); + ino -= int_ctrlr; + return build_irq(ino, iclr, imap); +} + +static void __init fire_irq_trans_init(struct device_node *dp) +{ + const struct linux_prom64_registers *regs; + struct fire_irq_data *irq_data; + + dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); + dp->irq_trans->irq_build = fire_irq_build; + + irq_data = prom_early_alloc(sizeof(struct fire_irq_data)); + + regs = of_get_property(dp, "reg", NULL); + dp->irq_trans->data = irq_data; + + irq_data->pbm_regs = regs[0].phys_addr; + irq_data->portid = of_getintprop_default(dp, "portid", 0); +} +#endif /* CONFIG_PCI */ + +#ifdef CONFIG_SBUS +/* INO number to IMAP register offset for SYSIO external IRQ's. + * This should conform to both Sunfire/Wildfire server and Fusion + * desktop designs. + */ +#define SYSIO_IMAP_SLOT0 0x2c00UL +#define SYSIO_IMAP_SLOT1 0x2c08UL +#define SYSIO_IMAP_SLOT2 0x2c10UL +#define SYSIO_IMAP_SLOT3 0x2c18UL +#define SYSIO_IMAP_SCSI 0x3000UL +#define SYSIO_IMAP_ETH 0x3008UL +#define SYSIO_IMAP_BPP 0x3010UL +#define SYSIO_IMAP_AUDIO 0x3018UL +#define SYSIO_IMAP_PFAIL 0x3020UL +#define SYSIO_IMAP_KMS 0x3028UL +#define SYSIO_IMAP_FLPY 0x3030UL +#define SYSIO_IMAP_SHW 0x3038UL +#define SYSIO_IMAP_KBD 0x3040UL +#define SYSIO_IMAP_MS 0x3048UL +#define SYSIO_IMAP_SER 0x3050UL +#define SYSIO_IMAP_TIM0 0x3060UL +#define SYSIO_IMAP_TIM1 0x3068UL +#define SYSIO_IMAP_UE 0x3070UL +#define SYSIO_IMAP_CE 0x3078UL +#define SYSIO_IMAP_SBERR 0x3080UL +#define SYSIO_IMAP_PMGMT 0x3088UL +#define SYSIO_IMAP_GFX 0x3090UL +#define SYSIO_IMAP_EUPA 0x3098UL + +#define bogon ((unsigned long) -1) +static unsigned long sysio_irq_offsets[] = { + /* SBUS Slot 0 --> 3, level 1 --> 7 */ + SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, + SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, + SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, + SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, + SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, + SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, + SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, + SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, + + /* Onboard devices (not relevant/used on SunFire). */ + SYSIO_IMAP_SCSI, + SYSIO_IMAP_ETH, + SYSIO_IMAP_BPP, + bogon, + SYSIO_IMAP_AUDIO, + SYSIO_IMAP_PFAIL, + bogon, + bogon, + SYSIO_IMAP_KMS, + SYSIO_IMAP_FLPY, + SYSIO_IMAP_SHW, + SYSIO_IMAP_KBD, + SYSIO_IMAP_MS, + SYSIO_IMAP_SER, + bogon, + bogon, + SYSIO_IMAP_TIM0, + SYSIO_IMAP_TIM1, + bogon, + bogon, + SYSIO_IMAP_UE, + SYSIO_IMAP_CE, + SYSIO_IMAP_SBERR, + SYSIO_IMAP_PMGMT, + SYSIO_IMAP_GFX, + SYSIO_IMAP_EUPA, +}; + +#undef bogon + +#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets) + +/* Convert Interrupt Mapping register pointer to associated + * Interrupt Clear register pointer, SYSIO specific version. + */ +#define SYSIO_ICLR_UNUSED0 0x3400UL +#define SYSIO_ICLR_SLOT0 0x3408UL +#define SYSIO_ICLR_SLOT1 0x3448UL +#define SYSIO_ICLR_SLOT2 0x3488UL +#define SYSIO_ICLR_SLOT3 0x34c8UL +static unsigned long sysio_imap_to_iclr(unsigned long imap) +{ + unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0; + return imap + diff; +} + +static unsigned int sbus_of_build_irq(struct device_node *dp, + unsigned int ino, + void *_data) +{ + unsigned long reg_base = (unsigned long) _data; + const struct linux_prom_registers *regs; + unsigned long imap, iclr; + int sbus_slot = 0; + int sbus_level = 0; + + ino &= 0x3f; + + regs = of_get_property(dp, "reg", NULL); + if (regs) + sbus_slot = regs->which_io; + + if (ino < 0x20) + ino += (sbus_slot * 8); + + imap = sysio_irq_offsets[ino]; + if (imap == ((unsigned long)-1)) { + prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n", + ino); + prom_halt(); + } + imap += reg_base; + + /* SYSIO inconsistency. For external SLOTS, we have to select + * the right ICLR register based upon the lower SBUS irq level + * bits. + */ + if (ino >= 0x20) { + iclr = sysio_imap_to_iclr(imap); + } else { + sbus_level = ino & 0x7; + + switch(sbus_slot) { + case 0: + iclr = reg_base + SYSIO_ICLR_SLOT0; + break; + case 1: + iclr = reg_base + SYSIO_ICLR_SLOT1; + break; + case 2: + iclr = reg_base + SYSIO_ICLR_SLOT2; + break; + default: + case 3: + iclr = reg_base + SYSIO_ICLR_SLOT3; + break; + } + + iclr += ((unsigned long)sbus_level - 1UL) * 8UL; + } + return build_irq(sbus_level, iclr, imap); +} + +static void __init sbus_irq_trans_init(struct device_node *dp) +{ + const struct linux_prom64_registers *regs; + + dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); + dp->irq_trans->irq_build = sbus_of_build_irq; + + regs = of_get_property(dp, "reg", NULL); + dp->irq_trans->data = (void *) (unsigned long) regs->phys_addr; +} +#endif /* CONFIG_SBUS */ + + +static unsigned int central_build_irq(struct device_node *dp, + unsigned int ino, + void *_data) +{ + struct device_node *central_dp = _data; + struct platform_device *central_op = of_find_device_by_node(central_dp); + struct resource *res; + unsigned long imap, iclr; + u32 tmp; + + if (of_node_name_eq(dp, "eeprom")) { + res = ¢ral_op->resource[5]; + } else if (of_node_name_eq(dp, "zs")) { + res = ¢ral_op->resource[4]; + } else if (of_node_name_eq(dp, "clock-board")) { + res = ¢ral_op->resource[3]; + } else { + return ino; + } + + imap = res->start + 0x00UL; + iclr = res->start + 0x10UL; + + /* Set the INO state to idle, and disable. */ + upa_writel(0, iclr); + upa_readl(iclr); + + tmp = upa_readl(imap); + tmp &= ~0x80000000; + upa_writel(tmp, imap); + + return build_irq(0, iclr, imap); +} + +static void __init central_irq_trans_init(struct device_node *dp) +{ + dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); + dp->irq_trans->irq_build = central_build_irq; + + dp->irq_trans->data = dp; +} + +struct irq_trans { + const char *name; + void (*init)(struct device_node *); +}; + +#ifdef CONFIG_PCI +static struct irq_trans __initdata pci_irq_trans_table[] = { + { "SUNW,sabre", sabre_irq_trans_init }, + { "pci108e,a000", sabre_irq_trans_init }, + { "pci108e,a001", sabre_irq_trans_init }, + { "SUNW,psycho", psycho_irq_trans_init }, + { "pci108e,8000", psycho_irq_trans_init }, + { "SUNW,schizo", schizo_irq_trans_init }, + { "pci108e,8001", schizo_irq_trans_init }, + { "SUNW,schizo+", schizo_irq_trans_init }, + { "pci108e,8002", schizo_irq_trans_init }, + { "SUNW,tomatillo", tomatillo_irq_trans_init }, + { "pci108e,a801", tomatillo_irq_trans_init }, + { "SUNW,sun4v-pci", pci_sun4v_irq_trans_init }, + { "pciex108e,80f0", fire_irq_trans_init }, +}; +#endif + +static unsigned int sun4v_vdev_irq_build(struct device_node *dp, + unsigned int devino, + void *_data) +{ + u32 devhandle = (u32) (unsigned long) _data; + + return sun4v_build_irq(devhandle, devino); +} + +static void __init sun4v_vdev_irq_trans_init(struct device_node *dp) +{ + const struct linux_prom64_registers *regs; + + dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); + dp->irq_trans->irq_build = sun4v_vdev_irq_build; + + regs = of_get_property(dp, "reg", NULL); + dp->irq_trans->data = (void *) (unsigned long) + ((regs->phys_addr >> 32UL) & 0x0fffffff); +} + +void __init irq_trans_init(struct device_node *dp) +{ +#ifdef CONFIG_PCI + const char *model; + int i; +#endif + +#ifdef CONFIG_PCI + model = of_get_property(dp, "model", NULL); + if (!model) + model = of_get_property(dp, "compatible", NULL); + if (model) { + for (i = 0; i < ARRAY_SIZE(pci_irq_trans_table); i++) { + struct irq_trans *t = &pci_irq_trans_table[i]; + + if (!strcmp(model, t->name)) { + t->init(dp); + return; + } + } + } +#endif +#ifdef CONFIG_SBUS + if (of_node_name_eq(dp, "sbus") || + of_node_name_eq(dp, "sbi")) { + sbus_irq_trans_init(dp); + return; + } +#endif + if (of_node_name_eq(dp, "fhc") && + of_node_name_eq(dp->parent, "central")) { + central_irq_trans_init(dp); + return; + } + if (of_node_name_eq(dp, "virtual-devices") || + of_node_name_eq(dp, "niu")) { + sun4v_vdev_irq_trans_init(dp); + return; + } +} diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c new file mode 100644 index 000000000..e90bcb6ba --- /dev/null +++ b/arch/sparc/kernel/psycho_common.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +/* psycho_common.c: Code common to PSYCHO and derivative PCI controllers. + * + * Copyright (C) 2008 David S. Miller + */ +#include +#include +#include + +#include + +#include "pci_impl.h" +#include "iommu_common.h" +#include "psycho_common.h" + +#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002ULL +#define PSYCHO_STCERR_WRITE 0x0000000000000002ULL +#define PSYCHO_STCERR_READ 0x0000000000000001ULL +#define PSYCHO_STCTAG_PPN 0x0fffffff00000000ULL +#define PSYCHO_STCTAG_VPN 0x00000000ffffe000ULL +#define PSYCHO_STCTAG_VALID 0x0000000000000002ULL +#define PSYCHO_STCTAG_WRITE 0x0000000000000001ULL +#define PSYCHO_STCLINE_LINDX 0x0000000001e00000ULL +#define PSYCHO_STCLINE_SPTR 0x00000000001f8000ULL +#define PSYCHO_STCLINE_LADDR 0x0000000000007f00ULL +#define PSYCHO_STCLINE_EPTR 0x00000000000000fcULL +#define PSYCHO_STCLINE_VALID 0x0000000000000002ULL +#define PSYCHO_STCLINE_FOFN 0x0000000000000001ULL + +static DEFINE_SPINLOCK(stc_buf_lock); +static unsigned long stc_error_buf[128]; +static unsigned long stc_tag_buf[16]; +static unsigned long stc_line_buf[16]; + +static void psycho_check_stc_error(struct pci_pbm_info *pbm) +{ + unsigned long err_base, tag_base, line_base; + struct strbuf *strbuf = &pbm->stc; + u64 control; + int i; + + if (!strbuf->strbuf_control) + return; + + err_base = strbuf->strbuf_err_stat; + tag_base = strbuf->strbuf_tag_diag; + line_base = strbuf->strbuf_line_diag; + + spin_lock(&stc_buf_lock); + + /* This is __REALLY__ dangerous. When we put the streaming + * buffer into diagnostic mode to probe it's tags and error + * status, we _must_ clear all of the line tag valid bits + * before re-enabling the streaming buffer. If any dirty data + * lives in the STC when we do this, we will end up + * invalidating it before it has a chance to reach main + * memory. + */ + control = upa_readq(strbuf->strbuf_control); + upa_writeq(control | PSYCHO_STRBUF_CTRL_DENAB, strbuf->strbuf_control); + for (i = 0; i < 128; i++) { + u64 val; + + val = upa_readq(err_base + (i * 8UL)); + upa_writeq(0UL, err_base + (i * 8UL)); + stc_error_buf[i] = val; + } + for (i = 0; i < 16; i++) { + stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL)); + stc_line_buf[i] = upa_readq(line_base + (i * 8UL)); + upa_writeq(0UL, tag_base + (i * 8UL)); + upa_writeq(0UL, line_base + (i * 8UL)); + } + + /* OK, state is logged, exit diagnostic mode. */ + upa_writeq(control, strbuf->strbuf_control); + + for (i = 0; i < 16; i++) { + int j, saw_error, first, last; + + saw_error = 0; + first = i * 8; + last = first + 8; + for (j = first; j < last; j++) { + u64 errval = stc_error_buf[j]; + if (errval != 0) { + saw_error++; + printk(KERN_ERR "%s: STC_ERR(%d)[wr(%d)" + "rd(%d)]\n", + pbm->name, + j, + (errval & PSYCHO_STCERR_WRITE) ? 1 : 0, + (errval & PSYCHO_STCERR_READ) ? 1 : 0); + } + } + if (saw_error != 0) { + u64 tagval = stc_tag_buf[i]; + u64 lineval = stc_line_buf[i]; + printk(KERN_ERR "%s: STC_TAG(%d)[PA(%016llx)VA(%08llx)" + "V(%d)W(%d)]\n", + pbm->name, + i, + ((tagval & PSYCHO_STCTAG_PPN) >> 19UL), + (tagval & PSYCHO_STCTAG_VPN), + ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0), + ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0)); + printk(KERN_ERR "%s: STC_LINE(%d)[LIDX(%llx)SP(%llx)" + "LADDR(%llx)EP(%llx)V(%d)FOFN(%d)]\n", + pbm->name, + i, + ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL), + ((lineval & PSYCHO_STCLINE_SPTR) >> 15UL), + ((lineval & PSYCHO_STCLINE_LADDR) >> 8UL), + ((lineval & PSYCHO_STCLINE_EPTR) >> 2UL), + ((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0), + ((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0)); + } + } + + spin_unlock(&stc_buf_lock); +} + +#define PSYCHO_IOMMU_TAG 0xa580UL +#define PSYCHO_IOMMU_DATA 0xa600UL + +static void psycho_record_iommu_tags_and_data(struct pci_pbm_info *pbm, + u64 *tag, u64 *data) +{ + int i; + + for (i = 0; i < 16; i++) { + unsigned long base = pbm->controller_regs; + unsigned long off = i * 8UL; + + tag[i] = upa_readq(base + PSYCHO_IOMMU_TAG+off); + data[i] = upa_readq(base + PSYCHO_IOMMU_DATA+off); + + /* Now clear out the entry. */ + upa_writeq(0, base + PSYCHO_IOMMU_TAG + off); + upa_writeq(0, base + PSYCHO_IOMMU_DATA + off); + } +} + +#define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL) +#define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL) +#define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL) +#define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL) +#define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL) +#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffULL +#define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL) +#define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL) +#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffULL + +static void psycho_dump_iommu_tags_and_data(struct pci_pbm_info *pbm, + u64 *tag, u64 *data) +{ + int i; + + for (i = 0; i < 16; i++) { + u64 tag_val, data_val; + const char *type_str; + tag_val = tag[i]; + if (!(tag_val & PSYCHO_IOMMU_TAG_ERR)) + continue; + + data_val = data[i]; + switch((tag_val & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) { + case 0: + type_str = "Protection Error"; + break; + case 1: + type_str = "Invalid Error"; + break; + case 2: + type_str = "TimeOut Error"; + break; + case 3: + default: + type_str = "ECC Error"; + break; + } + + printk(KERN_ERR "%s: IOMMU TAG(%d)[error(%s) wr(%d) " + "str(%d) sz(%dK) vpg(%08llx)]\n", + pbm->name, i, type_str, + ((tag_val & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0), + ((tag_val & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0), + ((tag_val & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8), + (tag_val & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT); + printk(KERN_ERR "%s: IOMMU DATA(%d)[valid(%d) cache(%d) " + "ppg(%016llx)]\n", + pbm->name, i, + ((data_val & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0), + ((data_val & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0), + (data_val & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT); + } +} + +#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL +#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL + +void psycho_check_iommu_error(struct pci_pbm_info *pbm, + unsigned long afsr, + unsigned long afar, + enum psycho_error_type type) +{ + u64 control, iommu_tag[16], iommu_data[16]; + struct iommu *iommu = pbm->iommu; + unsigned long flags; + + spin_lock_irqsave(&iommu->lock, flags); + control = upa_readq(iommu->iommu_control); + if (control & PSYCHO_IOMMU_CTRL_XLTEERR) { + const char *type_str; + + control &= ~PSYCHO_IOMMU_CTRL_XLTEERR; + upa_writeq(control, iommu->iommu_control); + + switch ((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) { + case 0: + type_str = "Protection Error"; + break; + case 1: + type_str = "Invalid Error"; + break; + case 2: + type_str = "TimeOut Error"; + break; + case 3: + default: + type_str = "ECC Error"; + break; + } + printk(KERN_ERR "%s: IOMMU Error, type[%s]\n", + pbm->name, type_str); + + /* It is very possible for another DVMA to occur while + * we do this probe, and corrupt the system further. + * But we are so screwed at this point that we are + * likely to crash hard anyways, so get as much + * diagnostic information to the console as we can. + */ + psycho_record_iommu_tags_and_data(pbm, iommu_tag, iommu_data); + psycho_dump_iommu_tags_and_data(pbm, iommu_tag, iommu_data); + } + psycho_check_stc_error(pbm); + spin_unlock_irqrestore(&iommu->lock, flags); +} + +#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL +#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL + +static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm) +{ + irqreturn_t ret = IRQ_NONE; + u64 csr, csr_error_bits; + u16 stat, *addr; + + csr = upa_readq(pbm->pci_csr); + csr_error_bits = csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR); + if (csr_error_bits) { + /* Clear the errors. */ + upa_writeq(csr, pbm->pci_csr); + + /* Log 'em. */ + if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR) + printk(KERN_ERR "%s: PCI streaming byte hole " + "error asserted.\n", pbm->name); + if (csr_error_bits & PSYCHO_PCICTRL_SERR) + printk(KERN_ERR "%s: PCI SERR signal asserted.\n", + pbm->name); + ret = IRQ_HANDLED; + } + addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno, + 0, PCI_STATUS); + pci_config_read16(addr, &stat); + if (stat & (PCI_STATUS_PARITY | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR)) { + printk(KERN_ERR "%s: PCI bus error, PCI_STATUS[%04x]\n", + pbm->name, stat); + pci_config_write16(addr, 0xffff); + ret = IRQ_HANDLED; + } + return ret; +} + +#define PSYCHO_PCIAFSR_PMA 0x8000000000000000ULL +#define PSYCHO_PCIAFSR_PTA 0x4000000000000000ULL +#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000ULL +#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000ULL +#define PSYCHO_PCIAFSR_SMA 0x0800000000000000ULL +#define PSYCHO_PCIAFSR_STA 0x0400000000000000ULL +#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000ULL +#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000ULL +#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000ULL +#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000ULL +#define PSYCHO_PCIAFSR_BLK 0x0000000080000000ULL +#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000ULL +#define PSYCHO_PCIAFSR_MID 0x000000003e000000ULL +#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffULL + +irqreturn_t psycho_pcierr_intr(int irq, void *dev_id) +{ + struct pci_pbm_info *pbm = dev_id; + u64 afsr, afar, error_bits; + int reported; + + afsr = upa_readq(pbm->pci_afsr); + afar = upa_readq(pbm->pci_afar); + error_bits = afsr & + (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA | + PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR | + PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA | + PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR); + if (!error_bits) + return psycho_pcierr_intr_other(pbm); + upa_writeq(error_bits, pbm->pci_afsr); + printk(KERN_ERR "%s: PCI Error, primary error type[%s]\n", + pbm->name, + (((error_bits & PSYCHO_PCIAFSR_PMA) ? + "Master Abort" : + ((error_bits & PSYCHO_PCIAFSR_PTA) ? + "Target Abort" : + ((error_bits & PSYCHO_PCIAFSR_PRTRY) ? + "Excessive Retries" : + ((error_bits & PSYCHO_PCIAFSR_PPERR) ? + "Parity Error" : "???")))))); + printk(KERN_ERR "%s: bytemask[%04llx] UPA_MID[%02llx] was_block(%d)\n", + pbm->name, + (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL, + (afsr & PSYCHO_PCIAFSR_MID) >> 25UL, + (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0); + printk(KERN_ERR "%s: PCI AFAR [%016llx]\n", pbm->name, afar); + printk(KERN_ERR "%s: PCI Secondary errors [", pbm->name); + reported = 0; + if (afsr & PSYCHO_PCIAFSR_SMA) { + reported++; + printk("(Master Abort)"); + } + if (afsr & PSYCHO_PCIAFSR_STA) { + reported++; + printk("(Target Abort)"); + } + if (afsr & PSYCHO_PCIAFSR_SRTRY) { + reported++; + printk("(Excessive Retries)"); + } + if (afsr & PSYCHO_PCIAFSR_SPERR) { + reported++; + printk("(Parity Error)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) { + psycho_check_iommu_error(pbm, afsr, afar, PCI_ERR); + pci_scan_for_target_abort(pbm, pbm->pci_bus); + } + if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA)) + pci_scan_for_master_abort(pbm, pbm->pci_bus); + + if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR)) + pci_scan_for_parity_error(pbm, pbm->pci_bus); + + return IRQ_HANDLED; +} + +static void psycho_iommu_flush(struct pci_pbm_info *pbm) +{ + int i; + + for (i = 0; i < 16; i++) { + unsigned long off = i * 8; + + upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_TAG + off); + upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_DATA + off); + } +} + +#define PSYCHO_IOMMU_CONTROL 0x0200UL +#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL +#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL +#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL +#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL +#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL +#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL +#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL +#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL +#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL +#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL +#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL +#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL +#define PSYCHO_IOMMU_FLUSH 0x0210UL +#define PSYCHO_IOMMU_TSBBASE 0x0208UL + +int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize, + u32 dvma_offset, u32 dma_mask, + unsigned long write_complete_offset) +{ + struct iommu *iommu = pbm->iommu; + u64 control; + int err; + + iommu->iommu_control = pbm->controller_regs + PSYCHO_IOMMU_CONTROL; + iommu->iommu_tsbbase = pbm->controller_regs + PSYCHO_IOMMU_TSBBASE; + iommu->iommu_flush = pbm->controller_regs + PSYCHO_IOMMU_FLUSH; + iommu->iommu_tags = pbm->controller_regs + PSYCHO_IOMMU_TAG; + iommu->write_complete_reg = (pbm->controller_regs + + write_complete_offset); + + iommu->iommu_ctxflush = 0; + + control = upa_readq(iommu->iommu_control); + control |= PSYCHO_IOMMU_CTRL_DENAB; + upa_writeq(control, iommu->iommu_control); + + psycho_iommu_flush(pbm); + + /* Leave diag mode enabled for full-flushing done in pci_iommu.c */ + err = iommu_table_init(iommu, tsbsize * 1024 * 8, + dvma_offset, dma_mask, pbm->numa_node); + if (err) + return err; + + upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); + + control = upa_readq(iommu->iommu_control); + control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ); + control |= PSYCHO_IOMMU_CTRL_ENAB; + + switch (tsbsize) { + case 64: + control |= PSYCHO_IOMMU_TSBSZ_64K; + break; + case 128: + control |= PSYCHO_IOMMU_TSBSZ_128K; + break; + default: + return -EINVAL; + } + + upa_writeq(control, iommu->iommu_control); + + return 0; + +} + +void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct platform_device *op, + const char *chip_name, int chip_type) +{ + struct device_node *dp = op->dev.of_node; + + pbm->name = dp->full_name; + pbm->numa_node = NUMA_NO_NODE; + pbm->chip_type = chip_type; + pbm->chip_version = of_getintprop_default(dp, "version#", 0); + pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0); + pbm->op = op; + pbm->pci_ops = &sun4u_pci_ops; + pbm->config_space_reg_bits = 8; + pbm->index = pci_num_pbms++; + pci_get_pbm_props(pbm); + pci_determine_mem_io_space(pbm); + + printk(KERN_INFO "%s: %s PCI Bus Module ver[%x:%x]\n", + pbm->name, chip_name, + pbm->chip_version, pbm->chip_revision); +} diff --git a/arch/sparc/kernel/psycho_common.h b/arch/sparc/kernel/psycho_common.h new file mode 100644 index 000000000..6925231c5 --- /dev/null +++ b/arch/sparc/kernel/psycho_common.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PSYCHO_COMMON_H +#define _PSYCHO_COMMON_H + +/* U2P Programmer's Manual, page 13-55, configuration space + * address format: + * + * 32 24 23 16 15 11 10 8 7 2 1 0 + * --------------------------------------------------------- + * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 | + * --------------------------------------------------------- + */ +#define PSYCHO_CONFIG_BASE(PBM) \ + ((PBM)->config_space | (1UL << 24)) +#define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \ + (((unsigned long)(BUS) << 16) | \ + ((unsigned long)(DEVFN) << 8) | \ + ((unsigned long)(REG))) + +static inline void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm, + unsigned char bus, + unsigned int devfn, + int where) +{ + return (void *) + (PSYCHO_CONFIG_BASE(pbm) | + PSYCHO_CONFIG_ENCODE(bus, devfn, where)); +} + +enum psycho_error_type { + UE_ERR, CE_ERR, PCI_ERR +}; + +void psycho_check_iommu_error(struct pci_pbm_info *pbm, + unsigned long afsr, + unsigned long afar, + enum psycho_error_type type); + +irqreturn_t psycho_pcierr_intr(int irq, void *dev_id); + +int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize, + u32 dvma_offset, u32 dma_mask, + unsigned long write_complete_offset); + +void psycho_pbm_init_common(struct pci_pbm_info *pbm, + struct platform_device *op, + const char *chip_name, int chip_type); + +#endif /* _PSYCHO_COMMON_H */ diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c new file mode 100644 index 000000000..e7db48acb --- /dev/null +++ b/arch/sparc/kernel/ptrace_32.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ptrace.c: Sparc process tracing support. + * + * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net) + * + * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson, + * and David Mosberger. + * + * Added Linux support -miguel (weird, eh?, the original code was meant + * to emulate SunOS). + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "kernel.h" + +/* #define ALLOW_INIT_TRACING */ + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *child) +{ + /* nothing to do */ +} + +enum sparc_regset { + REGSET_GENERAL, + REGSET_FP, +}; + +static int regwindow32_get(struct task_struct *target, + const struct pt_regs *regs, + u32 *uregs) +{ + unsigned long reg_window = regs->u_regs[UREG_I6]; + int size = 16 * sizeof(u32); + + if (target == current) { + if (copy_from_user(uregs, (void __user *)reg_window, size)) + return -EFAULT; + } else { + if (access_process_vm(target, reg_window, uregs, size, + FOLL_FORCE) != size) + return -EFAULT; + } + return 0; +} + +static int regwindow32_set(struct task_struct *target, + const struct pt_regs *regs, + u32 *uregs) +{ + unsigned long reg_window = regs->u_regs[UREG_I6]; + int size = 16 * sizeof(u32); + + if (target == current) { + if (copy_to_user((void __user *)reg_window, uregs, size)) + return -EFAULT; + } else { + if (access_process_vm(target, reg_window, uregs, size, + FOLL_FORCE | FOLL_WRITE) != size) + return -EFAULT; + } + return 0; +} + +static int genregs32_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const struct pt_regs *regs = target->thread.kregs; + u32 uregs[16]; + + if (target == current) + flush_user_windows(); + + membuf_write(&to, regs->u_regs, 16 * sizeof(u32)); + if (!to.left) + return 0; + if (regwindow32_get(target, regs, uregs)) + return -EFAULT; + membuf_write(&to, uregs, 16 * sizeof(u32)); + membuf_store(&to, regs->psr); + membuf_store(&to, regs->pc); + membuf_store(&to, regs->npc); + membuf_store(&to, regs->y); + return membuf_zero(&to, 2 * sizeof(u32)); +} + +static int genregs32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = target->thread.kregs; + u32 uregs[16]; + u32 psr; + int ret; + + if (target == current) + flush_user_windows(); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->u_regs, + 0, 16 * sizeof(u32)); + if (ret || !count) + return ret; + + if (regwindow32_get(target, regs, uregs)) + return -EFAULT; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + uregs, + 16 * sizeof(u32), 32 * sizeof(u32)); + if (ret) + return ret; + if (regwindow32_set(target, regs, uregs)) + return -EFAULT; + if (!count) + return 0; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &psr, + 32 * sizeof(u32), 33 * sizeof(u32)); + if (ret) + return ret; + regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | + (psr & (PSR_ICC | PSR_SYSCALL)); + if (!count) + return 0; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->pc, + 33 * sizeof(u32), 34 * sizeof(u32)); + if (ret || !count) + return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->npc, + 34 * sizeof(u32), 35 * sizeof(u32)); + if (ret || !count) + return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->y, + 35 * sizeof(u32), 36 * sizeof(u32)); + if (ret || !count) + return ret; + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 36 * sizeof(u32), 38 * sizeof(u32)); +} + +static int fpregs32_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ +#if 0 + if (target == current) + save_and_clear_fpu(); +#endif + + membuf_write(&to, target->thread.float_regs, 32 * sizeof(u32)); + membuf_zero(&to, sizeof(u32)); + membuf_write(&to, &target->thread.fsr, sizeof(u32)); + membuf_store(&to, (u32)((1 << 8) | (8 << 16))); + return membuf_zero(&to, 64 * sizeof(u32)); +} + +static int fpregs32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned long *fpregs = target->thread.float_regs; + int ret; + +#if 0 + if (target == current) + save_and_clear_fpu(); +#endif + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + fpregs, + 0, 32 * sizeof(u32)); + if (!ret) + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 32 * sizeof(u32), + 33 * sizeof(u32)); + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fsr, + 33 * sizeof(u32), + 34 * sizeof(u32)); + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 34 * sizeof(u32), -1); + return ret; +} + +static const struct user_regset sparc32_regsets[] = { + /* Format is: + * G0 --> G7 + * O0 --> O7 + * L0 --> L7 + * I0 --> I7 + * PSR, PC, nPC, Y, WIM, TBR + */ + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = 38, + .size = sizeof(u32), .align = sizeof(u32), + .regset_get = genregs32_get, .set = genregs32_set + }, + /* Format is: + * F0 --> F31 + * empty 32-bit word + * FSR (32--bit word) + * FPU QUEUE COUNT (8-bit char) + * FPU QUEUE ENTRYSIZE (8-bit char) + * FPU ENABLED (8-bit char) + * empty 8-bit char + * FPU QUEUE (64 32-bit ints) + */ + [REGSET_FP] = { + .core_note_type = NT_PRFPREG, + .n = 99, + .size = sizeof(u32), .align = sizeof(u32), + .regset_get = fpregs32_get, .set = fpregs32_set + }, +}; + +static int getregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const struct pt_regs *regs = target->thread.kregs; + + if (target == current) + flush_user_windows(); + + membuf_store(&to, regs->psr); + membuf_store(&to, regs->pc); + membuf_store(&to, regs->npc); + membuf_store(&to, regs->y); + return membuf_write(&to, regs->u_regs + 1, 15 * sizeof(u32)); +} + +static int setregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = target->thread.kregs; + u32 v[4]; + int ret; + + if (target == current) + flush_user_windows(); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + v, + 0, 4 * sizeof(u32)); + if (ret) + return ret; + regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | + (v[0] & (PSR_ICC | PSR_SYSCALL)); + regs->pc = v[1]; + regs->npc = v[2]; + regs->y = v[3]; + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->u_regs + 1, + 4 * sizeof(u32) , 19 * sizeof(u32)); +} + +static int getfpregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ +#if 0 + if (target == current) + save_and_clear_fpu(); +#endif + membuf_write(&to, &target->thread.float_regs, 32 * sizeof(u32)); + membuf_write(&to, &target->thread.fsr, sizeof(u32)); + return membuf_zero(&to, 35 * sizeof(u32)); +} + +static int setfpregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned long *fpregs = target->thread.float_regs; + int ret; + +#if 0 + if (target == current) + save_and_clear_fpu(); +#endif + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + fpregs, + 0, 32 * sizeof(u32)); + if (ret) + return ret; + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fsr, + 32 * sizeof(u32), + 33 * sizeof(u32)); +} + +static const struct user_regset ptrace32_regsets[] = { + [REGSET_GENERAL] = { + .n = 19, .size = sizeof(u32), + .regset_get = getregs_get, .set = setregs_set, + }, + [REGSET_FP] = { + .n = 68, .size = sizeof(u32), + .regset_get = getfpregs_get, .set = setfpregs_set, + }, +}; + +static const struct user_regset_view ptrace32_view = { + .regsets = ptrace32_regsets, .n = ARRAY_SIZE(ptrace32_regsets) +}; + +static const struct user_regset_view user_sparc32_view = { + .name = "sparc", .e_machine = EM_SPARC, + .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sparc32_view; +} + +struct fps { + unsigned long regs[32]; + unsigned long fsr; + unsigned long flags; + unsigned long extra; + unsigned long fpqd; + struct fq { + unsigned long *insnaddr; + unsigned long insn; + } fpq[16]; +}; + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4]; + void __user *addr2p; + struct pt_regs __user *pregs; + struct fps __user *fps; + int ret; + + addr2p = (void __user *) addr2; + pregs = (struct pt_regs __user *) addr; + fps = (struct fps __user *) addr; + + switch(request) { + case PTRACE_GETREGS: { + ret = copy_regset_to_user(child, &ptrace32_view, + REGSET_GENERAL, 0, + 19 * sizeof(u32), + pregs); + break; + } + + case PTRACE_SETREGS: { + ret = copy_regset_from_user(child, &ptrace32_view, + REGSET_GENERAL, 0, + 19 * sizeof(u32), + pregs); + break; + } + + case PTRACE_GETFPREGS: { + ret = copy_regset_to_user(child, &ptrace32_view, + REGSET_FP, 0, + 68 * sizeof(u32), + fps); + break; + } + + case PTRACE_SETFPREGS: { + ret = copy_regset_from_user(child, &ptrace32_view, + REGSET_FP, 0, + 33 * sizeof(u32), + fps); + break; + } + + case PTRACE_READTEXT: + case PTRACE_READDATA: + ret = ptrace_readdata(child, addr, addr2p, data); + + if (ret == data) + ret = 0; + else if (ret >= 0) + ret = -EIO; + break; + + case PTRACE_WRITETEXT: + case PTRACE_WRITEDATA: + ret = ptrace_writedata(child, addr2p, addr, data); + + if (ret == data) + ret = 0; + else if (ret >= 0) + ret = -EIO; + break; + + default: + if (request == PTRACE_SPARC_DETACH) + request = PTRACE_DETACH; + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p) +{ + int ret = 0; + + if (test_thread_flag(TIF_SYSCALL_TRACE)) { + if (syscall_exit_p) + ptrace_report_syscall_exit(regs, 0); + else + ret = ptrace_report_syscall_entry(regs); + } + + return ret; +} diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c new file mode 100644 index 000000000..86a7eb5c2 --- /dev/null +++ b/arch/sparc/kernel/ptrace_64.c @@ -0,0 +1,1177 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* ptrace.c: Sparc process tracing support. + * + * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * + * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson, + * and David Mosberger. + * + * Added Linux support -miguel (weird, eh?, the original code was meant + * to emulate SunOS). + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#include "entry.h" + +/* #define ALLOW_INIT_TRACING */ + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(n, r) \ + {.name = n, .offset = (PT_V9_##r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME("g0", G0), + REG_OFFSET_NAME("g1", G1), + REG_OFFSET_NAME("g2", G2), + REG_OFFSET_NAME("g3", G3), + REG_OFFSET_NAME("g4", G4), + REG_OFFSET_NAME("g5", G5), + REG_OFFSET_NAME("g6", G6), + REG_OFFSET_NAME("g7", G7), + + REG_OFFSET_NAME("i0", I0), + REG_OFFSET_NAME("i1", I1), + REG_OFFSET_NAME("i2", I2), + REG_OFFSET_NAME("i3", I3), + REG_OFFSET_NAME("i4", I4), + REG_OFFSET_NAME("i5", I5), + REG_OFFSET_NAME("i6", I6), + REG_OFFSET_NAME("i7", I7), + + REG_OFFSET_NAME("tstate", TSTATE), + REG_OFFSET_NAME("pc", TPC), + REG_OFFSET_NAME("npc", TNPC), + REG_OFFSET_NAME("y", Y), + REG_OFFSET_NAME("lr", I7), + + REG_OFFSET_END, +}; + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *child) +{ + /* nothing to do */ +} + +/* To get the necessary page struct, access_process_vm() first calls + * get_user_pages(). This has done a flush_dcache_page() on the + * accessed page. Then our caller (copy_{to,from}_user_page()) did + * to memcpy to read/write the data from that page. + * + * Now, the only thing we have to do is: + * 1) flush the D-cache if it's possible than an illegal alias + * has been created + * 2) flush the I-cache if this is pre-cheetah and we did a write + */ +void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write) +{ + BUG_ON(len > PAGE_SIZE); + + if (tlb_type == hypervisor) + return; + + preempt_disable(); + +#ifdef DCACHE_ALIASING_POSSIBLE + /* If bit 13 of the kernel address we used to access the + * user page is the same as the virtual address that page + * is mapped to in the user's address space, we can skip the + * D-cache flush. + */ + if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { + unsigned long start = __pa(kaddr); + unsigned long end = start + len; + unsigned long dcache_line_size; + + dcache_line_size = local_cpu_data().dcache_line_size; + + if (tlb_type == spitfire) { + for (; start < end; start += dcache_line_size) + spitfire_put_dcache_tag(start & 0x3fe0, 0x0); + } else { + start &= ~(dcache_line_size - 1); + for (; start < end; start += dcache_line_size) + __asm__ __volatile__( + "stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (start), + "i" (ASI_DCACHE_INVALIDATE)); + } + } +#endif + if (write && tlb_type == spitfire) { + unsigned long start = (unsigned long) kaddr; + unsigned long end = start + len; + unsigned long icache_line_size; + + icache_line_size = local_cpu_data().icache_line_size; + + for (; start < end; start += icache_line_size) + flushi(start); + } + + preempt_enable(); +} +EXPORT_SYMBOL_GPL(flush_ptrace_access); + +static int get_from_target(struct task_struct *target, unsigned long uaddr, + void *kbuf, int len) +{ + if (target == current) { + if (copy_from_user(kbuf, (void __user *) uaddr, len)) + return -EFAULT; + } else { + int len2 = access_process_vm(target, uaddr, kbuf, len, + FOLL_FORCE); + if (len2 != len) + return -EFAULT; + } + return 0; +} + +static int set_to_target(struct task_struct *target, unsigned long uaddr, + void *kbuf, int len) +{ + if (target == current) { + if (copy_to_user((void __user *) uaddr, kbuf, len)) + return -EFAULT; + } else { + int len2 = access_process_vm(target, uaddr, kbuf, len, + FOLL_FORCE | FOLL_WRITE); + if (len2 != len) + return -EFAULT; + } + return 0; +} + +static int regwindow64_get(struct task_struct *target, + const struct pt_regs *regs, + struct reg_window *wbuf) +{ + unsigned long rw_addr = regs->u_regs[UREG_I6]; + + if (!test_thread_64bit_stack(rw_addr)) { + struct reg_window32 win32; + int i; + + if (get_from_target(target, rw_addr, &win32, sizeof(win32))) + return -EFAULT; + for (i = 0; i < 8; i++) + wbuf->locals[i] = win32.locals[i]; + for (i = 0; i < 8; i++) + wbuf->ins[i] = win32.ins[i]; + } else { + rw_addr += STACK_BIAS; + if (get_from_target(target, rw_addr, wbuf, sizeof(*wbuf))) + return -EFAULT; + } + + return 0; +} + +static int regwindow64_set(struct task_struct *target, + const struct pt_regs *regs, + struct reg_window *wbuf) +{ + unsigned long rw_addr = regs->u_regs[UREG_I6]; + + if (!test_thread_64bit_stack(rw_addr)) { + struct reg_window32 win32; + int i; + + for (i = 0; i < 8; i++) + win32.locals[i] = wbuf->locals[i]; + for (i = 0; i < 8; i++) + win32.ins[i] = wbuf->ins[i]; + + if (set_to_target(target, rw_addr, &win32, sizeof(win32))) + return -EFAULT; + } else { + rw_addr += STACK_BIAS; + if (set_to_target(target, rw_addr, wbuf, sizeof(*wbuf))) + return -EFAULT; + } + + return 0; +} + +enum sparc_regset { + REGSET_GENERAL, + REGSET_FP, +}; + +static int genregs64_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const struct pt_regs *regs = task_pt_regs(target); + struct reg_window window; + + if (target == current) + flushw_user(); + + membuf_write(&to, regs->u_regs, 16 * sizeof(u64)); + if (!to.left) + return 0; + if (regwindow64_get(target, regs, &window)) + return -EFAULT; + membuf_write(&to, &window, 16 * sizeof(u64)); + /* TSTATE, TPC, TNPC */ + membuf_write(&to, ®s->tstate, 3 * sizeof(u64)); + return membuf_store(&to, (u64)regs->y); +} + +static int genregs64_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + int ret; + + if (target == current) + flushw_user(); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->u_regs, + 0, 16 * sizeof(u64)); + if (!ret && count && pos < (32 * sizeof(u64))) { + struct reg_window window; + + if (regwindow64_get(target, regs, &window)) + return -EFAULT; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &window, + 16 * sizeof(u64), + 32 * sizeof(u64)); + + if (!ret && + regwindow64_set(target, regs, &window)) + return -EFAULT; + } + + if (!ret && count > 0) { + unsigned long tstate; + + /* TSTATE */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &tstate, + 32 * sizeof(u64), + 33 * sizeof(u64)); + if (!ret) { + /* Only the condition codes and the "in syscall" + * state can be modified in the %tstate register. + */ + tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); + regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); + regs->tstate |= tstate; + } + } + + if (!ret) { + /* TPC, TNPC */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->tpc, + 33 * sizeof(u64), + 35 * sizeof(u64)); + } + + if (!ret) { + unsigned long y = regs->y; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &y, + 35 * sizeof(u64), + 36 * sizeof(u64)); + if (!ret) + regs->y = y; + } + + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 36 * sizeof(u64), -1); + + return ret; +} + +static int fpregs64_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct thread_info *t = task_thread_info(target); + unsigned long fprs; + + if (target == current) + save_and_clear_fpu(); + + fprs = t->fpsaved[0]; + + if (fprs & FPRS_DL) + membuf_write(&to, t->fpregs, 16 * sizeof(u64)); + else + membuf_zero(&to, 16 * sizeof(u64)); + + if (fprs & FPRS_DU) + membuf_write(&to, t->fpregs + 16, 16 * sizeof(u64)); + else + membuf_zero(&to, 16 * sizeof(u64)); + if (fprs & FPRS_FEF) { + membuf_store(&to, t->xfsr[0]); + membuf_store(&to, t->gsr[0]); + } else { + membuf_zero(&to, 2 * sizeof(u64)); + } + return membuf_store(&to, fprs); +} + +static int fpregs64_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned long *fpregs = task_thread_info(target)->fpregs; + unsigned long fprs; + int ret; + + if (target == current) + save_and_clear_fpu(); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + fpregs, + 0, 32 * sizeof(u64)); + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + task_thread_info(target)->xfsr, + 32 * sizeof(u64), + 33 * sizeof(u64)); + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + task_thread_info(target)->gsr, + 33 * sizeof(u64), + 34 * sizeof(u64)); + + fprs = task_thread_info(target)->fpsaved[0]; + if (!ret && count > 0) { + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fprs, + 34 * sizeof(u64), + 35 * sizeof(u64)); + } + + fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU); + task_thread_info(target)->fpsaved[0] = fprs; + + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 35 * sizeof(u64), -1); + return ret; +} + +static const struct user_regset sparc64_regsets[] = { + /* Format is: + * G0 --> G7 + * O0 --> O7 + * L0 --> L7 + * I0 --> I7 + * TSTATE, TPC, TNPC, Y + */ + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = 36, + .size = sizeof(u64), .align = sizeof(u64), + .regset_get = genregs64_get, .set = genregs64_set + }, + /* Format is: + * F0 --> F63 + * FSR + * GSR + * FPRS + */ + [REGSET_FP] = { + .core_note_type = NT_PRFPREG, + .n = 35, + .size = sizeof(u64), .align = sizeof(u64), + .regset_get = fpregs64_get, .set = fpregs64_set + }, +}; + +static int getregs64_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const struct pt_regs *regs = task_pt_regs(target); + + if (target == current) + flushw_user(); + + membuf_write(&to, regs->u_regs + 1, 15 * sizeof(u64)); + membuf_store(&to, (u64)0); + membuf_write(&to, ®s->tstate, 3 * sizeof(u64)); + return membuf_store(&to, (u64)regs->y); +} + +static int setregs64_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + unsigned long y = regs->y; + unsigned long tstate; + int ret; + + if (target == current) + flushw_user(); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->u_regs + 1, + 0 * sizeof(u64), + 15 * sizeof(u64)); + if (ret) + return ret; + ret =user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 15 * sizeof(u64), 16 * sizeof(u64)); + if (ret) + return ret; + /* TSTATE */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &tstate, + 16 * sizeof(u64), + 17 * sizeof(u64)); + if (ret) + return ret; + /* Only the condition codes and the "in syscall" + * state can be modified in the %tstate register. + */ + tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); + regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); + regs->tstate |= tstate; + + /* TPC, TNPC */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->tpc, + 17 * sizeof(u64), + 19 * sizeof(u64)); + if (ret) + return ret; + /* Y */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &y, + 19 * sizeof(u64), + 20 * sizeof(u64)); + if (!ret) + regs->y = y; + return ret; +} + +static const struct user_regset ptrace64_regsets[] = { + /* Format is: + * G1 --> G7 + * O0 --> O7 + * 0 + * TSTATE, TPC, TNPC, Y + */ + [REGSET_GENERAL] = { + .n = 20, .size = sizeof(u64), + .regset_get = getregs64_get, .set = setregs64_set, + }, +}; + +static const struct user_regset_view ptrace64_view = { + .regsets = ptrace64_regsets, .n = ARRAY_SIZE(ptrace64_regsets) +}; + +static const struct user_regset_view user_sparc64_view = { + .name = "sparc64", .e_machine = EM_SPARCV9, + .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets) +}; + +#ifdef CONFIG_COMPAT +static int genregs32_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const struct pt_regs *regs = task_pt_regs(target); + u32 uregs[16]; + int i; + + if (target == current) + flushw_user(); + + for (i = 0; i < 16; i++) + membuf_store(&to, (u32)regs->u_regs[i]); + if (!to.left) + return 0; + if (get_from_target(target, regs->u_regs[UREG_I6], + uregs, sizeof(uregs))) + return -EFAULT; + membuf_write(&to, uregs, 16 * sizeof(u32)); + membuf_store(&to, (u32)tstate_to_psr(regs->tstate)); + membuf_store(&to, (u32)(regs->tpc)); + membuf_store(&to, (u32)(regs->tnpc)); + membuf_store(&to, (u32)(regs->y)); + return membuf_zero(&to, 2 * sizeof(u32)); +} + +static int genregs32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + compat_ulong_t __user *reg_window; + const compat_ulong_t *k = kbuf; + const compat_ulong_t __user *u = ubuf; + compat_ulong_t reg; + + if (target == current) + flushw_user(); + + pos /= sizeof(reg); + count /= sizeof(reg); + + if (kbuf) { + for (; count > 0 && pos < 16; count--) + regs->u_regs[pos++] = *k++; + + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; + reg_window -= 16; + if (target == current) { + for (; count > 0 && pos < 32; count--) { + if (put_user(*k++, ®_window[pos++])) + return -EFAULT; + } + } else { + for (; count > 0 && pos < 32; count--) { + if (access_process_vm(target, + (unsigned long) + ®_window[pos], + (void *) k, + sizeof(*k), + FOLL_FORCE | FOLL_WRITE) + != sizeof(*k)) + return -EFAULT; + k++; + pos++; + } + } + } else { + for (; count > 0 && pos < 16; count--) { + if (get_user(reg, u++)) + return -EFAULT; + regs->u_regs[pos++] = reg; + } + + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; + reg_window -= 16; + if (target == current) { + for (; count > 0 && pos < 32; count--) { + if (get_user(reg, u++) || + put_user(reg, ®_window[pos++])) + return -EFAULT; + } + } else { + for (; count > 0 && pos < 32; count--) { + if (get_user(reg, u++)) + return -EFAULT; + if (access_process_vm(target, + (unsigned long) + ®_window[pos], + ®, sizeof(reg), + FOLL_FORCE | FOLL_WRITE) + != sizeof(reg)) + return -EFAULT; + pos++; + u++; + } + } + } + while (count > 0) { + unsigned long tstate; + + if (kbuf) + reg = *k++; + else if (get_user(reg, u++)) + return -EFAULT; + + switch (pos) { + case 32: /* PSR */ + tstate = regs->tstate; + tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); + tstate |= psr_to_tstate_icc(reg); + if (reg & PSR_SYSCALL) + tstate |= TSTATE_SYSCALL; + regs->tstate = tstate; + break; + case 33: /* PC */ + regs->tpc = reg; + break; + case 34: /* NPC */ + regs->tnpc = reg; + break; + case 35: /* Y */ + regs->y = reg; + break; + case 36: /* WIM */ + case 37: /* TBR */ + break; + default: + goto finish; + } + + pos++; + count--; + } +finish: + pos *= sizeof(reg); + count *= sizeof(reg); + + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 38 * sizeof(reg), -1); +} + +static int fpregs32_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct thread_info *t = task_thread_info(target); + bool enabled; + + if (target == current) + save_and_clear_fpu(); + + enabled = t->fpsaved[0] & FPRS_FEF; + + membuf_write(&to, t->fpregs, 32 * sizeof(u32)); + membuf_zero(&to, sizeof(u32)); + if (enabled) + membuf_store(&to, (u32)t->xfsr[0]); + else + membuf_zero(&to, sizeof(u32)); + membuf_store(&to, (u32)((enabled << 8) | (8 << 16))); + return membuf_zero(&to, 64 * sizeof(u32)); +} + +static int fpregs32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned long *fpregs = task_thread_info(target)->fpregs; + unsigned long fprs; + int ret; + + if (target == current) + save_and_clear_fpu(); + + fprs = task_thread_info(target)->fpsaved[0]; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + fpregs, + 0, 32 * sizeof(u32)); + if (!ret) + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 32 * sizeof(u32), + 33 * sizeof(u32)); + if (!ret && count > 0) { + compat_ulong_t fsr; + unsigned long val; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fsr, + 33 * sizeof(u32), + 34 * sizeof(u32)); + if (!ret) { + val = task_thread_info(target)->xfsr[0]; + val &= 0xffffffff00000000UL; + val |= fsr; + task_thread_info(target)->xfsr[0] = val; + } + } + + fprs |= (FPRS_FEF | FPRS_DL); + task_thread_info(target)->fpsaved[0] = fprs; + + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 34 * sizeof(u32), -1); + return ret; +} + +static const struct user_regset sparc32_regsets[] = { + /* Format is: + * G0 --> G7 + * O0 --> O7 + * L0 --> L7 + * I0 --> I7 + * PSR, PC, nPC, Y, WIM, TBR + */ + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = 38, + .size = sizeof(u32), .align = sizeof(u32), + .regset_get = genregs32_get, .set = genregs32_set + }, + /* Format is: + * F0 --> F31 + * empty 32-bit word + * FSR (32--bit word) + * FPU QUEUE COUNT (8-bit char) + * FPU QUEUE ENTRYSIZE (8-bit char) + * FPU ENABLED (8-bit char) + * empty 8-bit char + * FPU QUEUE (64 32-bit ints) + */ + [REGSET_FP] = { + .core_note_type = NT_PRFPREG, + .n = 99, + .size = sizeof(u32), .align = sizeof(u32), + .regset_get = fpregs32_get, .set = fpregs32_set + }, +}; + +static int getregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const struct pt_regs *regs = task_pt_regs(target); + int i; + + if (target == current) + flushw_user(); + + membuf_store(&to, (u32)tstate_to_psr(regs->tstate)); + membuf_store(&to, (u32)(regs->tpc)); + membuf_store(&to, (u32)(regs->tnpc)); + membuf_store(&to, (u32)(regs->y)); + for (i = 1; i < 16; i++) + membuf_store(&to, (u32)regs->u_regs[i]); + return to.left; +} + +static int setregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + unsigned long tstate; + u32 uregs[19]; + int i, ret; + + if (target == current) + flushw_user(); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + uregs, + 0, 19 * sizeof(u32)); + if (ret) + return ret; + + tstate = regs->tstate; + tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); + tstate |= psr_to_tstate_icc(uregs[0]); + if (uregs[0] & PSR_SYSCALL) + tstate |= TSTATE_SYSCALL; + regs->tstate = tstate; + regs->tpc = uregs[1]; + regs->tnpc = uregs[2]; + regs->y = uregs[3]; + + for (i = 1; i < 15; i++) + regs->u_regs[i] = uregs[3 + i]; + return 0; +} + +static int getfpregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct thread_info *t = task_thread_info(target); + + if (target == current) + save_and_clear_fpu(); + + membuf_write(&to, t->fpregs, 32 * sizeof(u32)); + if (t->fpsaved[0] & FPRS_FEF) + membuf_store(&to, (u32)t->xfsr[0]); + else + membuf_zero(&to, sizeof(u32)); + return membuf_zero(&to, 35 * sizeof(u32)); +} + +static int setfpregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned long *fpregs = task_thread_info(target)->fpregs; + unsigned long fprs; + int ret; + + if (target == current) + save_and_clear_fpu(); + + fprs = task_thread_info(target)->fpsaved[0]; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + fpregs, + 0, 32 * sizeof(u32)); + if (!ret) { + compat_ulong_t fsr; + unsigned long val; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fsr, + 32 * sizeof(u32), + 33 * sizeof(u32)); + if (!ret) { + val = task_thread_info(target)->xfsr[0]; + val &= 0xffffffff00000000UL; + val |= fsr; + task_thread_info(target)->xfsr[0] = val; + } + } + + fprs |= (FPRS_FEF | FPRS_DL); + task_thread_info(target)->fpsaved[0] = fprs; + return ret; +} + +static const struct user_regset ptrace32_regsets[] = { + [REGSET_GENERAL] = { + .n = 19, .size = sizeof(u32), + .regset_get = getregs_get, .set = setregs_set, + }, + [REGSET_FP] = { + .n = 68, .size = sizeof(u32), + .regset_get = getfpregs_get, .set = setfpregs_set, + }, +}; + +static const struct user_regset_view ptrace32_view = { + .regsets = ptrace32_regsets, .n = ARRAY_SIZE(ptrace32_regsets) +}; + +static const struct user_regset_view user_sparc32_view = { + .name = "sparc", .e_machine = EM_SPARC, + .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) +}; +#endif /* CONFIG_COMPAT */ + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_32BIT)) + return &user_sparc32_view; +#endif + return &user_sparc64_view; +} + +#ifdef CONFIG_COMPAT +struct compat_fps { + unsigned int regs[32]; + unsigned int fsr; + unsigned int flags; + unsigned int extra; + unsigned int fpqd; + struct compat_fq { + unsigned int insnaddr; + unsigned int insn; + } fpq[16]; +}; + +long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) +{ + compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4]; + struct pt_regs32 __user *pregs; + struct compat_fps __user *fps; + unsigned long addr2 = caddr2; + unsigned long addr = caddr; + unsigned long data = cdata; + int ret; + + pregs = (struct pt_regs32 __user *) addr; + fps = (struct compat_fps __user *) addr; + + switch (request) { + case PTRACE_PEEKUSR: + ret = (addr != 0) ? -EIO : 0; + break; + + case PTRACE_GETREGS: + ret = copy_regset_to_user(child, &ptrace32_view, + REGSET_GENERAL, 0, + 19 * sizeof(u32), + pregs); + break; + + case PTRACE_SETREGS: + ret = copy_regset_from_user(child, &ptrace32_view, + REGSET_GENERAL, 0, + 19 * sizeof(u32), + pregs); + break; + + case PTRACE_GETFPREGS: + ret = copy_regset_to_user(child, &ptrace32_view, + REGSET_FP, 0, + 68 * sizeof(u32), + fps); + break; + + case PTRACE_SETFPREGS: + ret = copy_regset_from_user(child, &ptrace32_view, + REGSET_FP, 0, + 33 * sizeof(u32), + fps); + break; + + case PTRACE_READTEXT: + case PTRACE_READDATA: + ret = ptrace_readdata(child, addr, + (char __user *)addr2, data); + if (ret == data) + ret = 0; + else if (ret >= 0) + ret = -EIO; + break; + + case PTRACE_WRITETEXT: + case PTRACE_WRITEDATA: + ret = ptrace_writedata(child, (char __user *) addr2, + addr, data); + if (ret == data) + ret = 0; + else if (ret >= 0) + ret = -EIO; + break; + + default: + if (request == PTRACE_SPARC_DETACH) + request = PTRACE_DETACH; + ret = compat_ptrace_request(child, request, addr, data); + break; + } + + return ret; +} +#endif /* CONFIG_COMPAT */ + +struct fps { + unsigned int regs[64]; + unsigned long fsr; +}; + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + const struct user_regset_view *view = task_user_regset_view(current); + unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4]; + struct pt_regs __user *pregs; + struct fps __user *fps; + void __user *addr2p; + int ret; + + pregs = (struct pt_regs __user *) addr; + fps = (struct fps __user *) addr; + addr2p = (void __user *) addr2; + + switch (request) { + case PTRACE_PEEKUSR: + ret = (addr != 0) ? -EIO : 0; + break; + + case PTRACE_GETREGS64: + ret = copy_regset_to_user(child, &ptrace64_view, + REGSET_GENERAL, 0, + 19 * sizeof(u64), + pregs); + break; + + case PTRACE_SETREGS64: + ret = copy_regset_from_user(child, &ptrace64_view, + REGSET_GENERAL, 0, + 19 * sizeof(u64), + pregs); + break; + + case PTRACE_GETFPREGS64: + ret = copy_regset_to_user(child, view, REGSET_FP, + 0 * sizeof(u64), + 33 * sizeof(u64), + fps); + break; + + case PTRACE_SETFPREGS64: + ret = copy_regset_from_user(child, view, REGSET_FP, + 0 * sizeof(u64), + 33 * sizeof(u64), + fps); + break; + + case PTRACE_READTEXT: + case PTRACE_READDATA: + ret = ptrace_readdata(child, addr, addr2p, data); + if (ret == data) + ret = 0; + else if (ret >= 0) + ret = -EIO; + break; + + case PTRACE_WRITETEXT: + case PTRACE_WRITEDATA: + ret = ptrace_writedata(child, addr2p, addr, data); + if (ret == data) + ret = 0; + else if (ret >= 0) + ret = -EIO; + break; + + default: + if (request == PTRACE_SPARC_DETACH) + request = PTRACE_DETACH; + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +asmlinkage int syscall_trace_enter(struct pt_regs *regs) +{ + int ret = 0; + + /* do the secure computing check first */ + secure_computing_strict(regs->u_regs[UREG_G1]); + + if (test_thread_flag(TIF_NOHZ)) + user_exit(); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ret = ptrace_report_syscall_entry(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->u_regs[UREG_G1]); + + audit_syscall_entry(regs->u_regs[UREG_G1], regs->u_regs[UREG_I0], + regs->u_regs[UREG_I1], regs->u_regs[UREG_I2], + regs->u_regs[UREG_I3]); + + return ret; +} + +asmlinkage void syscall_trace_leave(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_NOHZ)) + user_exit(); + + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->u_regs[UREG_I0]); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ptrace_report_syscall_exit(regs, 0); + + if (test_thread_flag(TIF_NOHZ)) + user_enter(); +} + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static inline int regs_within_kernel_stack(struct pt_regs *regs, + unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS; + return ((addr & ~(THREAD_SIZE - 1)) == + (ksp & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS; + unsigned long *addr = (unsigned long *)ksp; + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} diff --git a/arch/sparc/kernel/reboot.c b/arch/sparc/kernel/reboot.c new file mode 100644 index 000000000..69c1b6c04 --- /dev/null +++ b/arch/sparc/kernel/reboot.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* reboot.c: reboot/shutdown/halt/poweroff handling + * + * Copyright (C) 2008 David S. Miller + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +/* sysctl - toggle power-off restriction for serial console + * systems in machine_power_off() + */ +int scons_pwroff = 1; + +/* This isn't actually used, it exists merely to satisfy the + * reference in kernel/sys.c + */ +void (*pm_power_off)(void) = machine_power_off; +EXPORT_SYMBOL(pm_power_off); + +void machine_power_off(void) +{ + if (!of_node_is_type(of_console_device, "serial") || scons_pwroff) + prom_halt_power_off(); + + prom_halt(); +} + +void machine_halt(void) +{ + prom_halt(); + panic("Halt failed!"); +} + +void machine_restart(char *cmd) +{ + char *p; + + p = strchr(reboot_command, '\n'); + if (p) + *p = 0; + if (cmd) + prom_reboot(cmd); + if (*reboot_command) + prom_reboot(reboot_command); + prom_reboot(""); + panic("Reboot failed!"); +} + diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S new file mode 100644 index 000000000..8931fe266 --- /dev/null +++ b/arch/sparc/kernel/rtrap_32.S @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * rtrap.S: Return from Sparc trap low-level code. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define t_psr l0 +#define t_pc l1 +#define t_npc l2 +#define t_wim l3 +#define twin_tmp1 l4 +#define glob_tmp g4 +#define curptr g6 + + /* 7 WINDOW SPARC PATCH INSTRUCTIONS */ + .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3 + .globl rtrap_7win_patch4, rtrap_7win_patch5 +rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp +rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp +rtrap_7win_patch3: srl %g1, 7, %g2 +rtrap_7win_patch4: srl %g2, 6, %g2 +rtrap_7win_patch5: and %g1, 0x7f, %g1 + /* END OF PATCH INSTRUCTIONS */ + + /* We need to check for a few things which are: + * 1) The need to call schedule() because this + * processes quantum is up. + * 2) Pending signals for this process, if any + * exist we need to call do_signal() to do + * the needy. + * + * Else we just check if the rett would land us + * in an invalid window, if so we need to grab + * it off the user/kernel stack first. + */ + + .globl ret_trap_entry, rtrap_patch1, rtrap_patch2 + .globl rtrap_patch3, rtrap_patch4, rtrap_patch5 + .globl ret_trap_lockless_ipi +ret_trap_entry: +ret_trap_lockless_ipi: + andcc %t_psr, PSR_PS, %g0 + sethi %hi(PSR_SYSCALL), %g1 + be 1f + andn %t_psr, %g1, %t_psr + + wr %t_psr, 0x0, %psr + b ret_trap_kernel + nop + +1: + ld [%curptr + TI_FLAGS], %g2 + andcc %g2, (_TIF_NEED_RESCHED), %g0 + be signal_p + nop + + call schedule + nop + + ld [%curptr + TI_FLAGS], %g2 +signal_p: + andcc %g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0 + bz,a ret_trap_continue + ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr + + mov %g2, %o2 + mov %l6, %o1 + call do_notify_resume + add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr + + b signal_p + ld [%curptr + TI_FLAGS], %g2 + +ret_trap_continue: + sethi %hi(PSR_SYSCALL), %g1 + andn %t_psr, %g1, %t_psr + wr %t_psr, 0x0, %psr + WRITE_PAUSE + + ld [%curptr + TI_W_SAVED], %twin_tmp1 + orcc %g0, %twin_tmp1, %g0 + be ret_trap_nobufwins + nop + + wr %t_psr, PSR_ET, %psr + WRITE_PAUSE + + mov 1, %o1 + call try_to_clear_window_buffer + add %sp, STACKFRAME_SZ, %o0 + + b signal_p + ld [%curptr + TI_FLAGS], %g2 + +ret_trap_nobufwins: + /* Load up the user's out registers so we can pull + * a window from the stack, if necessary. + */ + LOAD_PT_INS(sp) + + /* If there are already live user windows in the + * set we can return from trap safely. + */ + ld [%curptr + TI_UWINMASK], %twin_tmp1 + orcc %g0, %twin_tmp1, %g0 + bne ret_trap_userwins_ok + nop + + /* Calculate new %wim, we have to pull a register + * window from the users stack. + */ +ret_trap_pull_one_window: + rd %wim, %t_wim + sll %t_wim, 0x1, %twin_tmp1 +rtrap_patch1: srl %t_wim, 0x7, %glob_tmp + or %glob_tmp, %twin_tmp1, %glob_tmp +rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp + + wr %glob_tmp, 0x0, %wim + + /* Here comes the architecture specific + * branch to the user stack checking routine + * for return from traps. + */ + b srmmu_rett_stackchk + andcc %fp, 0x7, %g0 + +ret_trap_userwins_ok: + LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc) + or %t_pc, %t_npc, %g2 + andcc %g2, 0x3, %g0 + sethi %hi(PSR_SYSCALL), %g2 + be 1f + andn %t_psr, %g2, %t_psr + + b ret_trap_unaligned_pc + add %sp, STACKFRAME_SZ, %o0 + +1: + LOAD_PT_YREG(sp, g1) + LOAD_PT_GLOBALS(sp) + + wr %t_psr, 0x0, %psr + WRITE_PAUSE + + jmp %t_pc + rett %t_npc + +ret_trap_unaligned_pc: + ld [%sp + STACKFRAME_SZ + PT_PC], %o1 + ld [%sp + STACKFRAME_SZ + PT_NPC], %o2 + ld [%sp + STACKFRAME_SZ + PT_PSR], %o3 + + wr %t_wim, 0x0, %wim ! or else... + + wr %t_psr, PSR_ET, %psr + WRITE_PAUSE + + call do_memaccess_unaligned + nop + + b signal_p + ld [%curptr + TI_FLAGS], %g2 + +ret_trap_kernel: + /* Will the rett land us in the invalid window? */ + mov 2, %g1 + sll %g1, %t_psr, %g1 +rtrap_patch3: srl %g1, 8, %g2 + or %g1, %g2, %g1 + rd %wim, %g2 + andcc %g2, %g1, %g0 + be 1f ! Nope, just return from the trap + sll %g2, 0x1, %g1 + + /* We have to grab a window before returning. */ +rtrap_patch4: srl %g2, 7, %g2 + or %g1, %g2, %g1 +rtrap_patch5: and %g1, 0xff, %g1 + + wr %g1, 0x0, %wim + + /* Grrr, make sure we load from the right %sp... */ + LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1) + + restore %g0, %g0, %g0 + LOAD_WINDOW(sp) + b 2f + save %g0, %g0, %g0 + + /* Reload the entire frame in case this is from a + * kernel system call or whatever... + */ +1: + LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1) +2: + sethi %hi(PSR_SYSCALL), %twin_tmp1 + andn %t_psr, %twin_tmp1, %t_psr + wr %t_psr, 0x0, %psr + WRITE_PAUSE + + jmp %t_pc + rett %t_npc + +ret_trap_user_stack_is_bolixed: + wr %t_wim, 0x0, %wim + + wr %t_psr, PSR_ET, %psr + WRITE_PAUSE + + call window_ret_fault + add %sp, STACKFRAME_SZ, %o0 + + b signal_p + ld [%curptr + TI_FLAGS], %g2 + + .globl srmmu_rett_stackchk +srmmu_rett_stackchk: + bne ret_trap_user_stack_is_bolixed + sethi %hi(PAGE_OFFSET), %g1 + cmp %g1, %fp + bleu ret_trap_user_stack_is_bolixed + mov AC_M_SFSR, %g1 +LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0) +SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0) + +LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1) +SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1) + or %g1, 0x2, %g1 +LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS) +SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS) + + restore %g0, %g0, %g0 + + LOAD_WINDOW(sp) + + save %g0, %g0, %g0 + + andn %g1, 0x2, %g1 +LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS) +SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS) + + mov AC_M_SFAR, %g2 +LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2) +SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2) + + mov AC_M_SFSR, %g1 +LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1) +SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1) + andcc %g1, 0x2, %g0 + be ret_trap_userwins_ok + nop + + b,a ret_trap_user_stack_is_bolixed diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S new file mode 100644 index 000000000..eef102765 --- /dev/null +++ b/arch/sparc/kernel/rtrap_64.S @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * rtrap.S: Preparing for return from trap on Sparc V9. + * + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + */ + + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_CONTEXT_TRACKING_USER +# define SCHEDULE_USER schedule_user +#else +# define SCHEDULE_USER schedule +#endif + + .text + .align 32 +__handle_preemption: + call SCHEDULE_USER +661: wrpr %g0, RTRAP_PSTATE, %pstate + /* If userspace is using ADI, it could potentially pass + * a pointer with version tag embedded in it. To maintain + * the ADI security, we must re-enable PSTATE.mcde before + * we continue execution in the kernel for another thread. + */ + .section .sun_m7_1insn_patch, "ax" + .word 661b + wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate + .previous + ba,pt %xcc, __handle_preemption_continue + wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate + +__handle_user_windows: + add %sp, PTREGS_OFF, %o0 + call fault_in_user_windows +661: wrpr %g0, RTRAP_PSTATE, %pstate + /* If userspace is using ADI, it could potentially pass + * a pointer with version tag embedded in it. To maintain + * the ADI security, we must re-enable PSTATE.mcde before + * we continue execution in the kernel for another thread. + */ + .section .sun_m7_1insn_patch, "ax" + .word 661b + wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate + .previous + ba,pt %xcc, __handle_preemption_continue + wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate + +__handle_userfpu: + rd %fprs, %l5 + andcc %l5, FPRS_FEF, %g0 + sethi %hi(TSTATE_PEF), %o0 + be,a,pn %icc, __handle_userfpu_continue + andn %l1, %o0, %l1 + ba,a,pt %xcc, __handle_userfpu_continue + +__handle_signal: + mov %l5, %o1 + add %sp, PTREGS_OFF, %o0 + mov %l0, %o2 + call do_notify_resume +661: wrpr %g0, RTRAP_PSTATE, %pstate + /* If userspace is using ADI, it could potentially pass + * a pointer with version tag embedded in it. To maintain + * the ADI security, we must re-enable PSTATE.mcde before + * we continue execution in the kernel for another thread. + */ + .section .sun_m7_1insn_patch, "ax" + .word 661b + wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate + .previous + wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate + + /* Signal delivery can modify pt_regs tstate, so we must + * reload it. + */ + ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 + sethi %hi(0xf << 20), %l4 + and %l1, %l4, %l4 + andn %l1, %l4, %l1 + ba,pt %xcc, __handle_preemption_continue + srl %l4, 20, %l4 + + /* When returning from a NMI (%pil==15) interrupt we want to + * avoid running softirqs, doing IRQ tracing, preempting, etc. + */ + .globl rtrap_nmi +rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 + sethi %hi(0xf << 20), %l4 + and %l1, %l4, %l4 + andn %l1, %l4, %l1 + srl %l4, 20, %l4 + ba,pt %xcc, rtrap_no_irq_enable + nop + /* Do not actually set the %pil here. We will do that + * below after we clear PSTATE_IE in the %pstate register. + * If we re-enable interrupts here, we can recurse down + * the hardirq stack potentially endlessly, causing a + * stack overflow. + */ + + .align 64 + .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall +rtrap_irq: +rtrap: + /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ + ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 +rtrap_xcall: + sethi %hi(0xf << 20), %l4 + and %l1, %l4, %l4 + andn %l1, %l4, %l1 + srl %l4, 20, %l4 +#ifdef CONFIG_TRACE_IRQFLAGS + brnz,pn %l4, rtrap_no_irq_enable + nop + call trace_hardirqs_on + nop + /* Do not actually set the %pil here. We will do that + * below after we clear PSTATE_IE in the %pstate register. + * If we re-enable interrupts here, we can recurse down + * the hardirq stack potentially endlessly, causing a + * stack overflow. + * + * It is tempting to put this test and trace_hardirqs_on + * call at the 'rt_continue' label, but that will not work + * as that path hits unconditionally and we do not want to + * execute this in NMI return paths, for example. + */ +#endif +rtrap_no_irq_enable: + andcc %l1, TSTATE_PRIV, %l3 + bne,pn %icc, to_kernel + nop + + /* We must hold IRQs off and atomically test schedule+signal + * state, then hold them off all the way back to userspace. + * If we are returning to kernel, none of this matters. Note + * that we are disabling interrupts via PSTATE_IE, not using + * %pil. + * + * If we do not do this, there is a window where we would do + * the tests, later the signal/resched event arrives but we do + * not process it since we are still in kernel mode. It would + * take until the next local IRQ before the signal/resched + * event would be handled. + * + * This also means that if we have to deal with user + * windows, we have to redo all of these sched+signal checks + * with IRQs disabled. + */ +to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate + wrpr 0, %pil +__handle_preemption_continue: + ldx [%g6 + TI_FLAGS], %l0 + sethi %hi(_TIF_USER_WORK_MASK), %o0 + or %o0, %lo(_TIF_USER_WORK_MASK), %o0 + andcc %l0, %o0, %g0 + sethi %hi(TSTATE_PEF), %o0 + be,pt %xcc, user_nowork + andcc %l1, %o0, %g0 + andcc %l0, _TIF_NEED_RESCHED, %g0 + bne,pn %xcc, __handle_preemption + andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 + bne,pn %xcc, __handle_signal + ldub [%g6 + TI_WSAVED], %o2 + brnz,pn %o2, __handle_user_windows + nop + sethi %hi(TSTATE_PEF), %o0 + andcc %l1, %o0, %g0 + + /* This fpdepth clear is necessary for non-syscall rtraps only */ +user_nowork: + bne,pn %xcc, __handle_userfpu + stb %g0, [%g6 + TI_FPDEPTH] +__handle_userfpu_continue: + +rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 + ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2 + + ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 + ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 + ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 + brz,pt %l3, 1f + mov %g6, %l2 + + /* Must do this before thread reg is clobbered below. */ + LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2) +1: + ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 + ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 + + /* Normal globals are restored, go to trap globals. */ +661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate + nop + .section .sun4v_2insn_patch, "ax" + .word 661b + wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate + SET_GL(1) + .previous + + mov %l2, %g6 + + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 + ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 + + ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 + ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 + ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 + ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 + ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6 + ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7 + ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2 + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2 + + ld [%sp + PTREGS_OFF + PT_V9_Y], %o3 + wr %o3, %g0, %y + wrpr %l4, 0x0, %pil + wrpr %g0, 0x1, %tl + andn %l1, TSTATE_SYSCALL, %l1 + wrpr %l1, %g0, %tstate + wrpr %l2, %g0, %tpc + wrpr %o2, %g0, %tnpc + + brnz,pn %l3, kern_rtt + mov PRIMARY_CONTEXT, %l7 + +661: ldxa [%l7 + %l7] ASI_DMMU, %l0 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%l7 + %l7] ASI_MMU, %l0 + .previous + + sethi %hi(sparc64_kern_pri_nuc_bits), %l1 + ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 + or %l0, %l1, %l0 + +661: stxa %l0, [%l7] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %l0, [%l7] ASI_MMU + .previous + + sethi %hi(KERNBASE), %l7 + flush %l7 + rdpr %wstate, %l1 + rdpr %otherwin, %l2 + srl %l1, 3, %l1 + +661: wrpr %l2, %g0, %canrestore + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x89880000 ! normalw + .previous + + wrpr %l1, %g0, %wstate + brnz,pt %l2, user_rtt_restore +661: wrpr %g0, %g0, %otherwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous + + ldx [%g6 + TI_FLAGS], %g3 + wr %g0, ASI_AIUP, %asi + rdpr %cwp, %g1 + andcc %g3, _TIF_32BIT, %g0 + sub %g1, 1, %g1 + bne,pt %xcc, user_rtt_fill_32bit + wrpr %g1, %cwp + ba,a,pt %xcc, user_rtt_fill_64bit + nop + +user_rtt_fill_fixup_dax: + ba,pt %xcc, user_rtt_fill_fixup_common + mov 1, %g3 + +user_rtt_fill_fixup_mna: + ba,pt %xcc, user_rtt_fill_fixup_common + mov 2, %g3 + +user_rtt_fill_fixup: + ba,pt %xcc, user_rtt_fill_fixup_common + clr %g3 + +user_rtt_pre_restore: + add %g1, 1, %g1 + wrpr %g1, 0x0, %cwp + +user_rtt_restore: + restore + rdpr %canrestore, %g1 + wrpr %g1, 0x0, %cleanwin + retry + nop + +kern_rtt: rdpr %canrestore, %g1 + brz,pn %g1, kern_rtt_fill + nop +kern_rtt_restore: + stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC] + restore + retry + +to_kernel: +#ifdef CONFIG_PREEMPTION + ldsw [%g6 + TI_PRE_COUNT], %l5 + brnz %l5, kern_fpucheck + ldx [%g6 + TI_FLAGS], %l5 + andcc %l5, _TIF_NEED_RESCHED, %g0 + be,pt %xcc, kern_fpucheck + nop + cmp %l4, 0 + bne,pn %xcc, kern_fpucheck + nop + call preempt_schedule_irq + nop + ba,pt %xcc, rtrap +#endif +kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 + brz,pt %l5, rt_continue + srl %l5, 1, %o0 + add %g6, TI_FPSAVED, %l6 + ldub [%l6 + %o0], %l2 + sub %l5, 2, %l5 + + add %g6, TI_GSR, %o1 + andcc %l2, (FPRS_FEF|FPRS_DU), %g0 + be,pt %icc, 2f + and %l2, FPRS_DL, %l6 + andcc %l2, FPRS_FEF, %g0 + be,pn %icc, 5f + sll %o0, 3, %o5 + rd %fprs, %g1 + + wr %g1, FPRS_FEF, %fprs + ldx [%o1 + %o5], %g1 + add %g6, TI_XFSR, %o1 + sll %o0, 8, %o2 + add %g6, TI_FPREGS, %o3 + brz,pn %l6, 1f + add %g6, TI_FPREGS+0x40, %o4 + + membar #Sync + ldda [%o3 + %o2] ASI_BLK_P, %f0 + ldda [%o4 + %o2] ASI_BLK_P, %f16 + membar #Sync +1: andcc %l2, FPRS_DU, %g0 + be,pn %icc, 1f + wr %g1, 0, %gsr + add %o2, 0x80, %o2 + membar #Sync + ldda [%o3 + %o2] ASI_BLK_P, %f32 + ldda [%o4 + %o2] ASI_BLK_P, %f48 +1: membar #Sync + ldx [%o1 + %o5], %fsr +2: stb %l5, [%g6 + TI_FPDEPTH] + ba,pt %xcc, rt_continue + nop +5: wr %g0, FPRS_FEF, %fprs + sll %o0, 8, %o2 + + add %g6, TI_FPREGS+0x80, %o3 + add %g6, TI_FPREGS+0xc0, %o4 + membar #Sync + ldda [%o3 + %o2] ASI_BLK_P, %f32 + ldda [%o4 + %o2] ASI_BLK_P, %f48 + membar #Sync + wr %g0, FPRS_DU, %fprs + ba,pt %xcc, rt_continue + stb %l5, [%g6 + TI_FPDEPTH] diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c new file mode 100644 index 000000000..32141e100 --- /dev/null +++ b/arch/sparc/kernel/sbus.c @@ -0,0 +1,678 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sbus.c: UltraSparc SBUS controller support. + * + * Copyright (C) 1999 David S. Miller (davem@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iommu_common.h" + +#define MAP_BASE ((u32)0xc0000000) + +/* Offsets from iommu_regs */ +#define SYSIO_IOMMUREG_BASE 0x2400UL +#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */ +#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */ +#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */ +#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */ +#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */ +#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */ +#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */ +#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */ + +#define IOMMU_DRAM_VALID (1UL << 30UL) + +/* Offsets from strbuf_regs */ +#define SYSIO_STRBUFREG_BASE 0x2800UL +#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */ +#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */ +#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */ +#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */ +#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */ +#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */ +#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */ + +#define STRBUF_TAG_VALID 0x02UL + +/* Enable 64-bit DVMA mode for the given device. */ +void sbus_set_sbus64(struct device *dev, int bursts) +{ + struct iommu *iommu = dev->archdata.iommu; + struct platform_device *op = to_platform_device(dev); + const struct linux_prom_registers *regs; + unsigned long cfg_reg; + int slot; + u64 val; + + regs = of_get_property(op->dev.of_node, "reg", NULL); + if (!regs) { + printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %pOF\n", + op->dev.of_node); + return; + } + slot = regs->which_io; + + cfg_reg = iommu->write_complete_reg; + switch (slot) { + case 0: + cfg_reg += 0x20UL; + break; + case 1: + cfg_reg += 0x28UL; + break; + case 2: + cfg_reg += 0x30UL; + break; + case 3: + cfg_reg += 0x38UL; + break; + case 13: + cfg_reg += 0x40UL; + break; + case 14: + cfg_reg += 0x48UL; + break; + case 15: + cfg_reg += 0x50UL; + break; + + default: + return; + } + + val = upa_readq(cfg_reg); + if (val & (1UL << 14UL)) { + /* Extended transfer mode already enabled. */ + return; + } + + val |= (1UL << 14UL); + + if (bursts & DMA_BURST8) + val |= (1UL << 1UL); + if (bursts & DMA_BURST16) + val |= (1UL << 2UL); + if (bursts & DMA_BURST32) + val |= (1UL << 3UL); + if (bursts & DMA_BURST64) + val |= (1UL << 4UL); + upa_writeq(val, cfg_reg); +} +EXPORT_SYMBOL(sbus_set_sbus64); + +/* INO number to IMAP register offset for SYSIO external IRQ's. + * This should conform to both Sunfire/Wildfire server and Fusion + * desktop designs. + */ +#define SYSIO_IMAP_SLOT0 0x2c00UL +#define SYSIO_IMAP_SLOT1 0x2c08UL +#define SYSIO_IMAP_SLOT2 0x2c10UL +#define SYSIO_IMAP_SLOT3 0x2c18UL +#define SYSIO_IMAP_SCSI 0x3000UL +#define SYSIO_IMAP_ETH 0x3008UL +#define SYSIO_IMAP_BPP 0x3010UL +#define SYSIO_IMAP_AUDIO 0x3018UL +#define SYSIO_IMAP_PFAIL 0x3020UL +#define SYSIO_IMAP_KMS 0x3028UL +#define SYSIO_IMAP_FLPY 0x3030UL +#define SYSIO_IMAP_SHW 0x3038UL +#define SYSIO_IMAP_KBD 0x3040UL +#define SYSIO_IMAP_MS 0x3048UL +#define SYSIO_IMAP_SER 0x3050UL +#define SYSIO_IMAP_TIM0 0x3060UL +#define SYSIO_IMAP_TIM1 0x3068UL +#define SYSIO_IMAP_UE 0x3070UL +#define SYSIO_IMAP_CE 0x3078UL +#define SYSIO_IMAP_SBERR 0x3080UL +#define SYSIO_IMAP_PMGMT 0x3088UL +#define SYSIO_IMAP_GFX 0x3090UL +#define SYSIO_IMAP_EUPA 0x3098UL + +#define bogon ((unsigned long) -1) +static unsigned long sysio_irq_offsets[] = { + /* SBUS Slot 0 --> 3, level 1 --> 7 */ + SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, + SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, + SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, + SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, + SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, + SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, + SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, + SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, + + /* Onboard devices (not relevant/used on SunFire). */ + SYSIO_IMAP_SCSI, + SYSIO_IMAP_ETH, + SYSIO_IMAP_BPP, + bogon, + SYSIO_IMAP_AUDIO, + SYSIO_IMAP_PFAIL, + bogon, + bogon, + SYSIO_IMAP_KMS, + SYSIO_IMAP_FLPY, + SYSIO_IMAP_SHW, + SYSIO_IMAP_KBD, + SYSIO_IMAP_MS, + SYSIO_IMAP_SER, + bogon, + bogon, + SYSIO_IMAP_TIM0, + SYSIO_IMAP_TIM1, + bogon, + bogon, + SYSIO_IMAP_UE, + SYSIO_IMAP_CE, + SYSIO_IMAP_SBERR, + SYSIO_IMAP_PMGMT, +}; + +#undef bogon + +#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets) + +/* Convert Interrupt Mapping register pointer to associated + * Interrupt Clear register pointer, SYSIO specific version. + */ +#define SYSIO_ICLR_UNUSED0 0x3400UL +#define SYSIO_ICLR_SLOT0 0x3408UL +#define SYSIO_ICLR_SLOT1 0x3448UL +#define SYSIO_ICLR_SLOT2 0x3488UL +#define SYSIO_ICLR_SLOT3 0x34c8UL +static unsigned long sysio_imap_to_iclr(unsigned long imap) +{ + unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0; + return imap + diff; +} + +static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino) +{ + struct iommu *iommu = op->dev.archdata.iommu; + unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; + unsigned long imap, iclr; + int sbus_level = 0; + + imap = sysio_irq_offsets[ino]; + if (imap == ((unsigned long)-1)) { + prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n", + ino); + prom_halt(); + } + imap += reg_base; + + /* SYSIO inconsistency. For external SLOTS, we have to select + * the right ICLR register based upon the lower SBUS irq level + * bits. + */ + if (ino >= 0x20) { + iclr = sysio_imap_to_iclr(imap); + } else { + int sbus_slot = (ino & 0x18)>>3; + + sbus_level = ino & 0x7; + + switch(sbus_slot) { + case 0: + iclr = reg_base + SYSIO_ICLR_SLOT0; + break; + case 1: + iclr = reg_base + SYSIO_ICLR_SLOT1; + break; + case 2: + iclr = reg_base + SYSIO_ICLR_SLOT2; + break; + default: + case 3: + iclr = reg_base + SYSIO_ICLR_SLOT3; + break; + } + + iclr += ((unsigned long)sbus_level - 1UL) * 8UL; + } + return build_irq(sbus_level, iclr, imap); +} + +/* Error interrupt handling. */ +#define SYSIO_UE_AFSR 0x0030UL +#define SYSIO_UE_AFAR 0x0038UL +#define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */ +#define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */ +#define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */ +#define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ +#define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */ +#define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/ +#define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */ +#define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */ +#define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */ +#define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */ +#define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */ +static irqreturn_t sysio_ue_handler(int irq, void *dev_id) +{ + struct platform_device *op = dev_id; + struct iommu *iommu = op->dev.archdata.iommu; + unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; + unsigned long afsr_reg, afar_reg; + unsigned long afsr, afar, error_bits; + int reported, portid; + + afsr_reg = reg_base + SYSIO_UE_AFSR; + afar_reg = reg_base + SYSIO_UE_AFAR; + + /* Latch error status. */ + afsr = upa_readq(afsr_reg); + afar = upa_readq(afar_reg); + + /* Clear primary/secondary error status bits. */ + error_bits = afsr & + (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR | + SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR); + upa_writeq(error_bits, afsr_reg); + + portid = of_getintprop_default(op->dev.of_node, "portid", -1); + + /* Log the error. */ + printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n", + portid, + (((error_bits & SYSIO_UEAFSR_PPIO) ? + "PIO" : + ((error_bits & SYSIO_UEAFSR_PDRD) ? + "DVMA Read" : + ((error_bits & SYSIO_UEAFSR_PDWR) ? + "DVMA Write" : "???"))))); + printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n", + portid, + (afsr & SYSIO_UEAFSR_DOFF) >> 45UL, + (afsr & SYSIO_UEAFSR_SIZE) >> 42UL, + (afsr & SYSIO_UEAFSR_MID) >> 37UL); + printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); + printk("SYSIO[%x]: Secondary UE errors [", portid); + reported = 0; + if (afsr & SYSIO_UEAFSR_SPIO) { + reported++; + printk("(PIO)"); + } + if (afsr & SYSIO_UEAFSR_SDRD) { + reported++; + printk("(DVMA Read)"); + } + if (afsr & SYSIO_UEAFSR_SDWR) { + reported++; + printk("(DVMA Write)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + return IRQ_HANDLED; +} + +#define SYSIO_CE_AFSR 0x0040UL +#define SYSIO_CE_AFAR 0x0048UL +#define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */ +#define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */ +#define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */ +#define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */ +#define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */ +#define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/ +#define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */ +#define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */ +#define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */ +#define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */ +#define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */ +#define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */ +static irqreturn_t sysio_ce_handler(int irq, void *dev_id) +{ + struct platform_device *op = dev_id; + struct iommu *iommu = op->dev.archdata.iommu; + unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; + unsigned long afsr_reg, afar_reg; + unsigned long afsr, afar, error_bits; + int reported, portid; + + afsr_reg = reg_base + SYSIO_CE_AFSR; + afar_reg = reg_base + SYSIO_CE_AFAR; + + /* Latch error status. */ + afsr = upa_readq(afsr_reg); + afar = upa_readq(afar_reg); + + /* Clear primary/secondary error status bits. */ + error_bits = afsr & + (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR | + SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR); + upa_writeq(error_bits, afsr_reg); + + portid = of_getintprop_default(op->dev.of_node, "portid", -1); + + printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n", + portid, + (((error_bits & SYSIO_CEAFSR_PPIO) ? + "PIO" : + ((error_bits & SYSIO_CEAFSR_PDRD) ? + "DVMA Read" : + ((error_bits & SYSIO_CEAFSR_PDWR) ? + "DVMA Write" : "???"))))); + + /* XXX Use syndrome and afar to print out module string just like + * XXX UDB CE trap handler does... -DaveM + */ + printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n", + portid, + (afsr & SYSIO_CEAFSR_DOFF) >> 45UL, + (afsr & SYSIO_CEAFSR_ESYND) >> 48UL, + (afsr & SYSIO_CEAFSR_SIZE) >> 42UL, + (afsr & SYSIO_CEAFSR_MID) >> 37UL); + printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); + + printk("SYSIO[%x]: Secondary CE errors [", portid); + reported = 0; + if (afsr & SYSIO_CEAFSR_SPIO) { + reported++; + printk("(PIO)"); + } + if (afsr & SYSIO_CEAFSR_SDRD) { + reported++; + printk("(DVMA Read)"); + } + if (afsr & SYSIO_CEAFSR_SDWR) { + reported++; + printk("(DVMA Write)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + return IRQ_HANDLED; +} + +#define SYSIO_SBUS_AFSR 0x2010UL +#define SYSIO_SBUS_AFAR 0x2018UL +#define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */ +#define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */ +#define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */ +#define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */ +#define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */ +#define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */ +#define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */ +#define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */ +#define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */ +#define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */ +#define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */ +#define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */ +static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) +{ + struct platform_device *op = dev_id; + struct iommu *iommu = op->dev.archdata.iommu; + unsigned long afsr_reg, afar_reg, reg_base; + unsigned long afsr, afar, error_bits; + int reported, portid; + + reg_base = iommu->write_complete_reg - 0x2000UL; + afsr_reg = reg_base + SYSIO_SBUS_AFSR; + afar_reg = reg_base + SYSIO_SBUS_AFAR; + + afsr = upa_readq(afsr_reg); + afar = upa_readq(afar_reg); + + /* Clear primary/secondary error status bits. */ + error_bits = afsr & + (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR | + SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR); + upa_writeq(error_bits, afsr_reg); + + portid = of_getintprop_default(op->dev.of_node, "portid", -1); + + /* Log the error. */ + printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n", + portid, + (((error_bits & SYSIO_SBAFSR_PLE) ? + "Late PIO Error" : + ((error_bits & SYSIO_SBAFSR_PTO) ? + "Time Out" : + ((error_bits & SYSIO_SBAFSR_PBERR) ? + "Error Ack" : "???")))), + (afsr & SYSIO_SBAFSR_RD) ? 1 : 0); + printk("SYSIO[%x]: size[%lx] MID[%lx]\n", + portid, + (afsr & SYSIO_SBAFSR_SIZE) >> 42UL, + (afsr & SYSIO_SBAFSR_MID) >> 37UL); + printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); + printk("SYSIO[%x]: Secondary SBUS errors [", portid); + reported = 0; + if (afsr & SYSIO_SBAFSR_SLE) { + reported++; + printk("(Late PIO Error)"); + } + if (afsr & SYSIO_SBAFSR_STO) { + reported++; + printk("(Time Out)"); + } + if (afsr & SYSIO_SBAFSR_SBERR) { + reported++; + printk("(Error Ack)"); + } + if (!reported) + printk("(none)"); + printk("]\n"); + + /* XXX check iommu/strbuf for further error status XXX */ + + return IRQ_HANDLED; +} + +#define ECC_CONTROL 0x0020UL +#define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */ +#define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */ +#define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */ + +#define SYSIO_UE_INO 0x34 +#define SYSIO_CE_INO 0x35 +#define SYSIO_SBUSERR_INO 0x36 + +static void __init sysio_register_error_handlers(struct platform_device *op) +{ + struct iommu *iommu = op->dev.archdata.iommu; + unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; + unsigned int irq; + u64 control; + int portid; + + portid = of_getintprop_default(op->dev.of_node, "portid", -1); + + irq = sbus_build_irq(op, SYSIO_UE_INO); + if (request_irq(irq, sysio_ue_handler, 0, + "SYSIO_UE", op) < 0) { + prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n", + portid); + prom_halt(); + } + + irq = sbus_build_irq(op, SYSIO_CE_INO); + if (request_irq(irq, sysio_ce_handler, 0, + "SYSIO_CE", op) < 0) { + prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n", + portid); + prom_halt(); + } + + irq = sbus_build_irq(op, SYSIO_SBUSERR_INO); + if (request_irq(irq, sysio_sbus_error_handler, 0, + "SYSIO_SBERR", op) < 0) { + prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n", + portid); + prom_halt(); + } + + /* Now turn the error interrupts on and also enable ECC checking. */ + upa_writeq((SYSIO_ECNTRL_ECCEN | + SYSIO_ECNTRL_UEEN | + SYSIO_ECNTRL_CEEN), + reg_base + ECC_CONTROL); + + control = upa_readq(iommu->write_complete_reg); + control |= 0x100UL; /* SBUS Error Interrupt Enable */ + upa_writeq(control, iommu->write_complete_reg); +} + +/* Boot time initialization. */ +static void __init sbus_iommu_init(struct platform_device *op) +{ + const struct linux_prom64_registers *pr; + struct device_node *dp = op->dev.of_node; + struct iommu *iommu; + struct strbuf *strbuf; + unsigned long regs, reg_base; + int i, portid; + u64 control; + + pr = of_get_property(dp, "reg", NULL); + if (!pr) { + prom_printf("sbus_iommu_init: Cannot map SYSIO " + "control registers.\n"); + prom_halt(); + } + regs = pr->phys_addr; + + iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); + strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); + if (!iommu || !strbuf) + goto fatal_memory_error; + + op->dev.archdata.iommu = iommu; + op->dev.archdata.stc = strbuf; + op->dev.archdata.numa_node = NUMA_NO_NODE; + + reg_base = regs + SYSIO_IOMMUREG_BASE; + iommu->iommu_control = reg_base + IOMMU_CONTROL; + iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE; + iommu->iommu_flush = reg_base + IOMMU_FLUSH; + iommu->iommu_tags = iommu->iommu_control + + (IOMMU_TAGDIAG - IOMMU_CONTROL); + + reg_base = regs + SYSIO_STRBUFREG_BASE; + strbuf->strbuf_control = reg_base + STRBUF_CONTROL; + strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH; + strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC; + + strbuf->strbuf_enabled = 1; + + strbuf->strbuf_flushflag = (volatile unsigned long *) + ((((unsigned long)&strbuf->__flushflag_buf[0]) + + 63UL) + & ~63UL); + strbuf->strbuf_flushflag_pa = (unsigned long) + __pa(strbuf->strbuf_flushflag); + + /* The SYSIO SBUS control register is used for dummy reads + * in order to ensure write completion. + */ + iommu->write_complete_reg = regs + 0x2000UL; + + portid = of_getintprop_default(op->dev.of_node, "portid", -1); + printk(KERN_INFO "SYSIO: UPA portID %x, at %016lx\n", + portid, regs); + + /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ + if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff, -1)) + goto fatal_memory_error; + + control = upa_readq(iommu->iommu_control); + control = ((7UL << 16UL) | + (0UL << 2UL) | + (1UL << 1UL) | + (1UL << 0UL)); + upa_writeq(control, iommu->iommu_control); + + /* Clean out any cruft in the IOMMU using + * diagnostic accesses. + */ + for (i = 0; i < 16; i++) { + unsigned long dram, tag; + + dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL); + tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL); + + dram += (unsigned long)i * 8UL; + tag += (unsigned long)i * 8UL; + upa_writeq(0, dram); + upa_writeq(0, tag); + } + upa_readq(iommu->write_complete_reg); + + /* Give the TSB to SYSIO. */ + upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); + + /* Setup streaming buffer, DE=1 SB_EN=1 */ + control = (1UL << 1UL) | (1UL << 0UL); + upa_writeq(control, strbuf->strbuf_control); + + /* Clear out the tags using diagnostics. */ + for (i = 0; i < 16; i++) { + unsigned long ptag, ltag; + + ptag = strbuf->strbuf_control + + (STRBUF_PTAGDIAG - STRBUF_CONTROL); + ltag = strbuf->strbuf_control + + (STRBUF_LTAGDIAG - STRBUF_CONTROL); + ptag += (unsigned long)i * 8UL; + ltag += (unsigned long)i * 8UL; + + upa_writeq(0UL, ptag); + upa_writeq(0UL, ltag); + } + + /* Enable DVMA arbitration for all devices/slots. */ + control = upa_readq(iommu->write_complete_reg); + control |= 0x3fUL; + upa_writeq(control, iommu->write_complete_reg); + + /* Now some Xfire specific grot... */ + if (this_is_starfire) + starfire_hookup(portid); + + sysio_register_error_handlers(op); + return; + +fatal_memory_error: + kfree(iommu); + kfree(strbuf); + prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); +} + +static int __init sbus_init(void) +{ + struct device_node *dp; + + for_each_node_by_name(dp, "sbus") { + struct platform_device *op = of_find_device_by_node(dp); + + sbus_iommu_init(op); + of_propagate_archdata(op); + } + + return 0; +} + +subsys_initcall(sbus_init); diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c new file mode 100644 index 000000000..c9d1ba4f3 --- /dev/null +++ b/arch/sparc/kernel/setup_32.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sparc/kernel/setup.c + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2000 Anton Blanchard (anton@samba.org) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" + +struct screen_info screen_info = { + 0, 0, /* orig-x, orig-y */ + 0, /* unused */ + 0, /* orig-video-page */ + 0, /* orig-video-mode */ + 128, /* orig-video-cols */ + 0,0,0, /* ega_ax, ega_bx, ega_cx */ + 54, /* orig-video-lines */ + 0, /* orig-video-isVGA */ + 16 /* orig-video-points */ +}; + +/* Typing sync at the prom prompt calls the function pointed to by + * romvec->pv_synchook which I set to the following function. + * This should sync all filesystems and return, for now it just + * prints out pretty messages and returns. + */ + +/* Pretty sick eh? */ +static void prom_sync_me(void) +{ + unsigned long prom_tbr, flags; + + /* XXX Badly broken. FIX! - Anton */ + local_irq_save(flags); + __asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr)); + __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" : : "r" (&trapbase)); + + prom_printf("PROM SYNC COMMAND...\n"); + show_free_areas(0, NULL); + if (!is_idle_task(current)) { + local_irq_enable(); + ksys_sync(); + local_irq_disable(); + } + prom_printf("Returning to prom\n"); + + __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" : : "r" (prom_tbr)); + local_irq_restore(flags); +} + +static unsigned int boot_flags __initdata = 0; +#define BOOTME_DEBUG 0x1 + +/* Exported for mm/init.c:paging_init. */ +unsigned long cmdline_memory_size __initdata = 0; + +/* which CPU booted us (0xff = not set) */ +unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */ + +static void +prom_console_write(struct console *con, const char *s, unsigned int n) +{ + prom_write(s, n); +} + +static struct console prom_early_console = { + .name = "earlyprom", + .write = prom_console_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1, +}; + +/* + * Process kernel command line switches that are specific to the + * SPARC or that require special low-level processing. + */ +static void __init process_switch(char c) +{ + switch (c) { + case 'd': + boot_flags |= BOOTME_DEBUG; + break; + case 's': + break; + case 'h': + prom_printf("boot_flags_init: Halt!\n"); + prom_halt(); + break; + case 'p': + prom_early_console.flags &= ~CON_BOOT; + break; + default: + printk("Unknown boot switch (-%c)\n", c); + break; + } +} + +static void __init boot_flags_init(char *commands) +{ + while (*commands) { + /* Move to the start of the next "argument". */ + while (*commands == ' ') + commands++; + + /* Process any command switches, otherwise skip it. */ + if (*commands == '\0') + break; + if (*commands == '-') { + commands++; + while (*commands && *commands != ' ') + process_switch(*commands++); + continue; + } + if (!strncmp(commands, "mem=", 4)) { + /* + * "mem=XXX[kKmM] overrides the PROM-reported + * memory size. + */ + cmdline_memory_size = simple_strtoul(commands + 4, + &commands, 0); + if (*commands == 'K' || *commands == 'k') { + cmdline_memory_size <<= 10; + commands++; + } else if (*commands=='M' || *commands=='m') { + cmdline_memory_size <<= 20; + commands++; + } + } + while (*commands && *commands != ' ') + commands++; + } +} + +extern unsigned short root_flags; +extern unsigned short root_dev; +extern unsigned short ram_flags; +#define RAMDISK_IMAGE_START_MASK 0x07FF +#define RAMDISK_PROMPT_FLAG 0x8000 +#define RAMDISK_LOAD_FLAG 0x4000 + +extern int root_mountflags; + +char reboot_command[COMMAND_LINE_SIZE]; + +struct cpuid_patch_entry { + unsigned int addr; + unsigned int sun4d[3]; + unsigned int leon[3]; +}; +extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; + +static void __init per_cpu_patch(void) +{ + struct cpuid_patch_entry *p; + + if (sparc_cpu_model == sun4m) { + /* Nothing to do, this is what the unpatched code + * targets. + */ + return; + } + + p = &__cpuid_patch; + while (p < &__cpuid_patch_end) { + unsigned long addr = p->addr; + unsigned int *insns; + + switch (sparc_cpu_model) { + case sun4d: + insns = &p->sun4d[0]; + break; + + case sparc_leon: + insns = &p->leon[0]; + break; + default: + prom_printf("Unknown cpu type, halting.\n"); + prom_halt(); + } + *(unsigned int *) (addr + 0) = insns[0]; + flushi(addr + 0); + *(unsigned int *) (addr + 4) = insns[1]; + flushi(addr + 4); + *(unsigned int *) (addr + 8) = insns[2]; + flushi(addr + 8); + + p++; + } +} + +struct leon_1insn_patch_entry { + unsigned int addr; + unsigned int insn; +}; + +enum sparc_cpu sparc_cpu_model; +EXPORT_SYMBOL(sparc_cpu_model); + +static __init void leon_patch(void) +{ + struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch; + struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end; + + /* Default instruction is leon - no patching */ + if (sparc_cpu_model == sparc_leon) + return; + + while (start < end) { + unsigned long addr = start->addr; + + *(unsigned int *)(addr) = start->insn; + flushi(addr); + + start++; + } +} + +struct tt_entry *sparc_ttable; + +/* Called from head_32.S - before we have setup anything + * in the kernel. Be very careful with what you do here. + */ +void __init sparc32_start_kernel(struct linux_romvec *rp) +{ + prom_init(rp); + + /* Set sparc_cpu_model */ + sparc_cpu_model = sun_unknown; + if (!strcmp(&cputypval[0], "sun4m")) + sparc_cpu_model = sun4m; + if (!strcmp(&cputypval[0], "sun4s")) + sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */ + if (!strcmp(&cputypval[0], "sun4d")) + sparc_cpu_model = sun4d; + if (!strcmp(&cputypval[0], "sun4e")) + sparc_cpu_model = sun4e; + if (!strcmp(&cputypval[0], "sun4u")) + sparc_cpu_model = sun4u; + if (!strncmp(&cputypval[0], "leon" , 4)) + sparc_cpu_model = sparc_leon; + + leon_patch(); + start_kernel(); +} + +void __init setup_arch(char **cmdline_p) +{ + int i; + unsigned long highest_paddr; + + sparc_ttable = &trapbase; + + /* Initialize PROM console and command line. */ + *cmdline_p = prom_getbootargs(); + strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + parse_early_param(); + + boot_flags_init(*cmdline_p); + + register_console(&prom_early_console); + + switch(sparc_cpu_model) { + case sun4m: + pr_info("ARCH: SUN4M\n"); + break; + case sun4d: + pr_info("ARCH: SUN4D\n"); + break; + case sun4e: + pr_info("ARCH: SUN4E\n"); + break; + case sun4u: + pr_info("ARCH: SUN4U\n"); + break; + case sparc_leon: + pr_info("ARCH: LEON\n"); + break; + default: + pr_info("ARCH: UNKNOWN!\n"); + break; + } + + idprom_init(); + load_mmu(); + + phys_base = 0xffffffffUL; + highest_paddr = 0UL; + for (i = 0; sp_banks[i].num_bytes != 0; i++) { + unsigned long top; + + if (sp_banks[i].base_addr < phys_base) + phys_base = sp_banks[i].base_addr; + top = sp_banks[i].base_addr + + sp_banks[i].num_bytes; + if (highest_paddr < top) + highest_paddr = top; + } + pfn_base = phys_base >> PAGE_SHIFT; + + if (!root_flags) + root_mountflags &= ~MS_RDONLY; + ROOT_DEV = old_decode_dev(root_dev); +#ifdef CONFIG_BLK_DEV_RAM + rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; +#endif + + prom_setsync(prom_sync_me); + + if((boot_flags & BOOTME_DEBUG) && (linux_dbvec != NULL) && + ((*(short *)linux_dbvec) != -1)) { + printk("Booted under KADB. Syncing trap table.\n"); + (*(linux_dbvec->teach_debugger))(); + } + + /* Run-time patch instructions to match the cpu model */ + per_cpu_patch(); + + paging_init(); + + smp_setup_cpu_possible_map(); +} + +extern int stop_a_enabled; + +void sun_do_break(void) +{ + if (!stop_a_enabled) + return; + + printk("\n"); + flush_user_windows(); + + prom_cmdline(); +} +EXPORT_SYMBOL(sun_do_break); + +int stop_a_enabled = 1; + +static int __init topology_init(void) +{ + int i, ncpus, err; + + /* Count the number of physically present processors in + * the machine, even on uniprocessor, so that /proc/cpuinfo + * output is consistent with 2.4.x + */ + ncpus = 0; + while (!cpu_find_by_instance(ncpus, NULL, NULL)) + ncpus++; + ncpus_probed = ncpus; + + err = 0; + for_each_online_cpu(i) { + struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + err = -ENOMEM; + else + register_cpu(p, i); + } + + return err; +} + +subsys_initcall(topology_init); + +#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) +void __init arch_cpu_finalize_init(void) +{ + cpu_data(0).udelay_val = loops_per_jiffy; +} +#endif diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c new file mode 100644 index 000000000..48abee4ee --- /dev/null +++ b/arch/sparc/kernel/setup_64.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sparc64/kernel/setup.c + * + * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_IP_PNP +#include +#endif + +#include "entry.h" +#include "kernel.h" + +/* Used to synchronize accesses to NatSemi SUPER I/O chip configure + * operations in asm/ns87303.h + */ +DEFINE_SPINLOCK(ns87303_lock); +EXPORT_SYMBOL(ns87303_lock); + +struct screen_info screen_info = { + 0, 0, /* orig-x, orig-y */ + 0, /* unused */ + 0, /* orig-video-page */ + 0, /* orig-video-mode */ + 128, /* orig-video-cols */ + 0, 0, 0, /* unused, ega_bx, unused */ + 54, /* orig-video-lines */ + 0, /* orig-video-isVGA */ + 16 /* orig-video-points */ +}; + +static void +prom_console_write(struct console *con, const char *s, unsigned int n) +{ + prom_write(s, n); +} + +/* Exported for mm/init.c:paging_init. */ +unsigned long cmdline_memory_size = 0; + +static struct console prom_early_console = { + .name = "earlyprom", + .write = prom_console_write, + .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, + .index = -1, +}; + +/* + * Process kernel command line switches that are specific to the + * SPARC or that require special low-level processing. + */ +static void __init process_switch(char c) +{ + switch (c) { + case 'd': + case 's': + break; + case 'h': + prom_printf("boot_flags_init: Halt!\n"); + prom_halt(); + break; + case 'p': + prom_early_console.flags &= ~CON_BOOT; + break; + case 'P': + /* Force UltraSPARC-III P-Cache on. */ + if (tlb_type != cheetah) { + printk("BOOT: Ignoring P-Cache force option.\n"); + break; + } + cheetah_pcache_forced_on = 1; + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); + cheetah_enable_pcache(); + break; + + default: + printk("Unknown boot switch (-%c)\n", c); + break; + } +} + +static void __init boot_flags_init(char *commands) +{ + while (*commands) { + /* Move to the start of the next "argument". */ + while (*commands == ' ') + commands++; + + /* Process any command switches, otherwise skip it. */ + if (*commands == '\0') + break; + if (*commands == '-') { + commands++; + while (*commands && *commands != ' ') + process_switch(*commands++); + continue; + } + if (!strncmp(commands, "mem=", 4)) + cmdline_memory_size = memparse(commands + 4, &commands); + + while (*commands && *commands != ' ') + commands++; + } +} + +extern unsigned short root_flags; +extern unsigned short root_dev; +extern unsigned short ram_flags; +#define RAMDISK_IMAGE_START_MASK 0x07FF +#define RAMDISK_PROMPT_FLAG 0x8000 +#define RAMDISK_LOAD_FLAG 0x4000 + +extern int root_mountflags; + +char reboot_command[COMMAND_LINE_SIZE]; + +static void __init per_cpu_patch(void) +{ + struct cpuid_patch_entry *p; + unsigned long ver; + int is_jbus; + + if (tlb_type == spitfire && !this_is_starfire) + return; + + is_jbus = 0; + if (tlb_type != hypervisor) { + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + is_jbus = ((ver >> 32UL) == __JALAPENO_ID || + (ver >> 32UL) == __SERRANO_ID); + } + + p = &__cpuid_patch; + while (p < &__cpuid_patch_end) { + unsigned long addr = p->addr; + unsigned int *insns; + + switch (tlb_type) { + case spitfire: + insns = &p->starfire[0]; + break; + case cheetah: + case cheetah_plus: + if (is_jbus) + insns = &p->cheetah_jbus[0]; + else + insns = &p->cheetah_safari[0]; + break; + case hypervisor: + insns = &p->sun4v[0]; + break; + default: + prom_printf("Unknown cpu type, halting.\n"); + prom_halt(); + } + + *(unsigned int *) (addr + 0) = insns[0]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + *(unsigned int *) (addr + 4) = insns[1]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 4)); + + *(unsigned int *) (addr + 8) = insns[2]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 8)); + + *(unsigned int *) (addr + 12) = insns[3]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 12)); + + p++; + } +} + +void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start, + struct sun4v_1insn_patch_entry *end) +{ + while (start < end) { + unsigned long addr = start->addr; + + *(unsigned int *) (addr + 0) = start->insn; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + start++; + } +} + +void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, + struct sun4v_2insn_patch_entry *end) +{ + while (start < end) { + unsigned long addr = start->addr; + + *(unsigned int *) (addr + 0) = start->insns[0]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + *(unsigned int *) (addr + 4) = start->insns[1]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 4)); + + start++; + } +} + +void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start, + struct sun4v_2insn_patch_entry *end) +{ + while (start < end) { + unsigned long addr = start->addr; + + *(unsigned int *) (addr + 0) = start->insns[0]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + *(unsigned int *) (addr + 4) = start->insns[1]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 4)); + + start++; + } +} + +static void __init sun4v_patch(void) +{ + extern void sun4v_hvapi_init(void); + + if (tlb_type != hypervisor) + return; + + sun4v_patch_1insn_range(&__sun4v_1insn_patch, + &__sun4v_1insn_patch_end); + + sun4v_patch_2insn_range(&__sun4v_2insn_patch, + &__sun4v_2insn_patch_end); + + switch (sun4v_chip_type) { + case SUN4V_CHIP_SPARC_M7: + case SUN4V_CHIP_SPARC_M8: + case SUN4V_CHIP_SPARC_SN: + sun4v_patch_1insn_range(&__sun_m7_1insn_patch, + &__sun_m7_1insn_patch_end); + sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, + &__sun_m7_2insn_patch_end); + break; + default: + break; + } + + if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) { + sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch, + &__fast_win_ctrl_1insn_patch_end); + } + + sun4v_hvapi_init(); +} + +static void __init popc_patch(void) +{ + struct popc_3insn_patch_entry *p3; + struct popc_6insn_patch_entry *p6; + + p3 = &__popc_3insn_patch; + while (p3 < &__popc_3insn_patch_end) { + unsigned long i, addr = p3->addr; + + for (i = 0; i < 3; i++) { + *(unsigned int *) (addr + (i * 4)) = p3->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p3++; + } + + p6 = &__popc_6insn_patch; + while (p6 < &__popc_6insn_patch_end) { + unsigned long i, addr = p6->addr; + + for (i = 0; i < 6; i++) { + *(unsigned int *) (addr + (i * 4)) = p6->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p6++; + } +} + +static void __init pause_patch(void) +{ + struct pause_patch_entry *p; + + p = &__pause_3insn_patch; + while (p < &__pause_3insn_patch_end) { + unsigned long i, addr = p->addr; + + for (i = 0; i < 3; i++) { + *(unsigned int *) (addr + (i * 4)) = p->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p++; + } +} + +void __init start_early_boot(void) +{ + int cpu; + + check_if_starfire(); + per_cpu_patch(); + sun4v_patch(); + smp_init_cpu_poke(); + + cpu = hard_smp_processor_id(); + if (cpu >= NR_CPUS) { + prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", + cpu, NR_CPUS); + prom_halt(); + } + current_thread_info()->cpu = cpu; + + time_init_early(); + prom_init_report(); + start_kernel(); +} + +/* On Ultra, we support all of the v8 capabilities. */ +unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | + HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | + HWCAP_SPARC_V9); +EXPORT_SYMBOL(sparc64_elf_hwcap); + +static const char *hwcaps[] = { + "flush", "stbar", "swap", "muldiv", "v9", + "ultra3", "blkinit", "n2", + + /* These strings are as they appear in the machine description + * 'hwcap-list' property for cpu nodes. + */ + "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2", + "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau", + "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */, + "adp", +}; + +static const char *crypto_hwcaps[] = { + "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256", + "sha512", "mpmul", "montmul", "montsqr", "crc32c", +}; + +void cpucap_info(struct seq_file *m) +{ + unsigned long caps = sparc64_elf_hwcap; + int i, printed = 0; + + seq_puts(m, "cpucaps\t\t: "); + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + if (hwcaps[i] && (caps & bit)) { + seq_printf(m, "%s%s", + printed ? "," : "", hwcaps[i]); + printed++; + } + } + if (caps & HWCAP_SPARC_CRYPTO) { + unsigned long cfr; + + __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); + for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { + unsigned long bit = 1UL << i; + if (cfr & bit) { + seq_printf(m, "%s%s", + printed ? "," : "", crypto_hwcaps[i]); + printed++; + } + } + } + seq_putc(m, '\n'); +} + +static void __init report_one_hwcap(int *printed, const char *name) +{ + if ((*printed) == 0) + printk(KERN_INFO "CPU CAPS: ["); + printk(KERN_CONT "%s%s", + (*printed) ? "," : "", name); + if (++(*printed) == 8) { + printk(KERN_CONT "]\n"); + *printed = 0; + } +} + +static void __init report_crypto_hwcaps(int *printed) +{ + unsigned long cfr; + int i; + + __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); + + for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { + unsigned long bit = 1UL << i; + if (cfr & bit) + report_one_hwcap(printed, crypto_hwcaps[i]); + } +} + +static void __init report_hwcaps(unsigned long caps) +{ + int i, printed = 0; + + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + if (hwcaps[i] && (caps & bit)) + report_one_hwcap(&printed, hwcaps[i]); + } + if (caps & HWCAP_SPARC_CRYPTO) + report_crypto_hwcaps(&printed); + if (printed != 0) + printk(KERN_CONT "]\n"); +} + +static unsigned long __init mdesc_cpu_hwcap_list(void) +{ + struct mdesc_handle *hp; + unsigned long caps = 0; + const char *prop; + int len; + u64 pn; + + hp = mdesc_grab(); + if (!hp) + return 0; + + pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu"); + if (pn == MDESC_NODE_NULL) + goto out; + + prop = mdesc_get_property(hp, pn, "hwcap-list", &len); + if (!prop) + goto out; + + while (len) { + int i, plen; + + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + + if (hwcaps[i] && !strcmp(prop, hwcaps[i])) { + caps |= bit; + break; + } + } + for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { + if (!strcmp(prop, crypto_hwcaps[i])) + caps |= HWCAP_SPARC_CRYPTO; + } + + plen = strlen(prop) + 1; + prop += plen; + len -= plen; + } + +out: + mdesc_release(hp); + return caps; +} + +/* This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. + */ +static void __init init_sparc64_elf_hwcap(void) +{ + unsigned long cap = sparc64_elf_hwcap; + unsigned long mdesc_caps; + + if (tlb_type == cheetah || tlb_type == cheetah_plus) + cap |= HWCAP_SPARC_ULTRA3; + else if (tlb_type == hypervisor) { + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || + sun4v_chip_type == SUN4V_CHIP_SPARC_SN || + sun4v_chip_type == SUN4V_CHIP_SPARC64X) + cap |= HWCAP_SPARC_BLKINIT; + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || + sun4v_chip_type == SUN4V_CHIP_SPARC_SN || + sun4v_chip_type == SUN4V_CHIP_SPARC64X) + cap |= HWCAP_SPARC_N2; + } + + cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS); + + mdesc_caps = mdesc_cpu_hwcap_list(); + if (!mdesc_caps) { + if (tlb_type == spitfire) + cap |= AV_SPARC_VIS; + if (tlb_type == cheetah || tlb_type == cheetah_plus) + cap |= AV_SPARC_VIS | AV_SPARC_VIS2; + if (tlb_type == cheetah_plus) { + unsigned long impl, ver; + + __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); + impl = ((ver >> 32) & 0xffff); + if (impl == PANTHER_IMPL) + cap |= AV_SPARC_POPC; + } + if (tlb_type == hypervisor) { + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) + cap |= AV_SPARC_ASI_BLK_INIT; + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || + sun4v_chip_type == SUN4V_CHIP_SPARC_SN || + sun4v_chip_type == SUN4V_CHIP_SPARC64X) + cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | + AV_SPARC_ASI_BLK_INIT | + AV_SPARC_POPC); + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || + sun4v_chip_type == SUN4V_CHIP_SPARC_SN || + sun4v_chip_type == SUN4V_CHIP_SPARC64X) + cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | + AV_SPARC_FMAF); + } + } + sparc64_elf_hwcap = cap | mdesc_caps; + + report_hwcaps(sparc64_elf_hwcap); + + if (sparc64_elf_hwcap & AV_SPARC_POPC) + popc_patch(); + if (sparc64_elf_hwcap & AV_SPARC_PAUSE) + pause_patch(); +} + +void __init alloc_irqstack_bootmem(void) +{ + unsigned int i, node; + + for_each_possible_cpu(i) { + node = cpu_to_node(i); + + softirq_stack[i] = memblock_alloc_node(THREAD_SIZE, + THREAD_SIZE, node); + if (!softirq_stack[i]) + panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n", + __func__, THREAD_SIZE, THREAD_SIZE, node); + hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE, + THREAD_SIZE, node); + if (!hardirq_stack[i]) + panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n", + __func__, THREAD_SIZE, THREAD_SIZE, node); + } +} + +void __init setup_arch(char **cmdline_p) +{ + /* Initialize PROM console and command line. */ + *cmdline_p = prom_getbootargs(); + strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + parse_early_param(); + + boot_flags_init(*cmdline_p); +#ifdef CONFIG_EARLYFB + if (btext_find_display()) +#endif + register_console(&prom_early_console); + + if (tlb_type == hypervisor) + pr_info("ARCH: SUN4V\n"); + else + pr_info("ARCH: SUN4U\n"); + + idprom_init(); + + if (!root_flags) + root_mountflags &= ~MS_RDONLY; + ROOT_DEV = old_decode_dev(root_dev); +#ifdef CONFIG_BLK_DEV_RAM + rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; +#endif + +#ifdef CONFIG_IP_PNP + if (!ic_set_manually) { + phandle chosen = prom_finddevice("/chosen"); + u32 cl, sv, gw; + + cl = prom_getintdefault (chosen, "client-ip", 0); + sv = prom_getintdefault (chosen, "server-ip", 0); + gw = prom_getintdefault (chosen, "gateway-ip", 0); + if (cl && sv) { + ic_myaddr = cl; + ic_servaddr = sv; + if (gw) + ic_gateway = gw; +#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP) + ic_proto_enabled = 0; +#endif + } + } +#endif + + /* Get boot processor trap_block[] setup. */ + init_cur_cpu_trap(current_thread_info()); + + paging_init(); + init_sparc64_elf_hwcap(); + smp_fill_in_cpu_possible_map(); + /* + * Once the OF device tree and MDESC have been setup and nr_cpus has + * been parsed, we know the list of possible cpus. Therefore we can + * allocate the IRQ stacks. + */ + alloc_irqstack_bootmem(); +} + +extern int stop_a_enabled; + +void sun_do_break(void) +{ + if (!stop_a_enabled) + return; + + prom_printf("\n"); + flush_user_windows(); + + prom_cmdline(); +} +EXPORT_SYMBOL(sun_do_break); + +int stop_a_enabled = 1; +EXPORT_SYMBOL(stop_a_enabled); diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c new file mode 100644 index 000000000..dad38960d --- /dev/null +++ b/arch/sparc/kernel/signal32.c @@ -0,0 +1,784 @@ +// SPDX-License-Identifier: GPL-2.0 +/* arch/sparc64/kernel/signal32.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "sigutil.h" +#include "kernel.h" + +/* This magic should be in g_upper[0] for all upper parts + * to be valid. + */ +#define SIGINFO_EXTRA_V8PLUS_MAGIC 0x130e269 +typedef struct { + unsigned int g_upper[8]; + unsigned int o_upper[8]; + unsigned int asi; +} siginfo_extra_v8plus_t; + +struct signal_frame32 { + struct sparc_stackf32 ss; + __siginfo32_t info; + /* __siginfo_fpu_t * */ u32 fpu_save; + unsigned int insns[2]; + unsigned int extramask[_COMPAT_NSIG_WORDS - 1]; + unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ + /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ + siginfo_extra_v8plus_t v8plus; + /* __siginfo_rwin_t * */u32 rwin_save; +} __attribute__((aligned(8))); + +struct rt_signal_frame32 { + struct sparc_stackf32 ss; + compat_siginfo_t info; + struct pt_regs32 regs; + compat_sigset_t mask; + /* __siginfo_fpu_t * */ u32 fpu_save; + unsigned int insns[2]; + compat_stack_t stack; + unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ + /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ + siginfo_extra_v8plus_t v8plus; + /* __siginfo_rwin_t * */u32 rwin_save; +} __attribute__((aligned(8))); + +/* Checks if the fp is valid. We always build signal frames which are + * 16-byte aligned, therefore we can always enforce that the restore + * frame has that property as well. + */ +static bool invalid_frame_pointer(void __user *fp, int fplen) +{ + if ((((unsigned long) fp) & 15) || + ((unsigned long)fp) > 0x100000000ULL - fplen) + return true; + return false; +} + +void do_sigreturn32(struct pt_regs *regs) +{ + struct signal_frame32 __user *sf; + compat_uptr_t fpu_save; + compat_uptr_t rwin_save; + unsigned int psr, ufp; + unsigned int pc, npc; + sigset_t set; + compat_sigset_t seta; + int err, i; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + synchronize_user_stack(); + + regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; + sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; + + /* 1. Make sure we are not getting garbage from the user */ + if (invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) + goto segv; + + if (__get_user(pc, &sf->info.si_regs.pc) || + __get_user(npc, &sf->info.si_regs.npc)) + goto segv; + + if ((pc | npc) & 3) + goto segv; + + if (test_thread_flag(TIF_32BIT)) { + pc &= 0xffffffff; + npc &= 0xffffffff; + } + regs->tpc = pc; + regs->tnpc = npc; + + /* 2. Restore the state */ + err = __get_user(regs->y, &sf->info.si_regs.y); + err |= __get_user(psr, &sf->info.si_regs.psr); + + for (i = UREG_G1; i <= UREG_I7; i++) + err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]); + if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) { + err |= __get_user(i, &sf->v8plus.g_upper[0]); + if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) { + unsigned long asi; + + for (i = UREG_G1; i <= UREG_I7; i++) + err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]); + err |= __get_user(asi, &sf->v8plus.asi); + regs->tstate &= ~TSTATE_ASI; + regs->tstate |= ((asi & 0xffUL) << 24UL); + } + } + + /* User can only change condition codes in %tstate. */ + regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC); + regs->tstate |= psr_to_tstate_icc(psr); + + /* Prevent syscall restart. */ + pt_regs_clear_syscall(regs); + + err |= __get_user(fpu_save, &sf->fpu_save); + if (!err && fpu_save) + err |= restore_fpu_state(regs, compat_ptr(fpu_save)); + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(compat_ptr(rwin_save))) + goto segv; + } + err |= __get_user(seta.sig[0], &sf->info.si_mask); + err |= copy_from_user(&seta.sig[1], &sf->extramask, + (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); + if (err) + goto segv; + + set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); + set_current_blocked(&set); + return; + +segv: + force_sig(SIGSEGV); +} + +asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) +{ + struct rt_signal_frame32 __user *sf; + unsigned int psr, pc, npc, ufp; + compat_uptr_t fpu_save; + compat_uptr_t rwin_save; + sigset_t set; + int err, i; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + synchronize_user_stack(); + regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; + sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; + + /* 1. Make sure we are not getting garbage from the user */ + if (invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) + goto segv; + + if (__get_user(pc, &sf->regs.pc) || + __get_user(npc, &sf->regs.npc)) + goto segv; + + if ((pc | npc) & 3) + goto segv; + + if (test_thread_flag(TIF_32BIT)) { + pc &= 0xffffffff; + npc &= 0xffffffff; + } + regs->tpc = pc; + regs->tnpc = npc; + + /* 2. Restore the state */ + err = __get_user(regs->y, &sf->regs.y); + err |= __get_user(psr, &sf->regs.psr); + + for (i = UREG_G1; i <= UREG_I7; i++) + err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]); + if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) { + err |= __get_user(i, &sf->v8plus.g_upper[0]); + if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) { + unsigned long asi; + + for (i = UREG_G1; i <= UREG_I7; i++) + err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]); + err |= __get_user(asi, &sf->v8plus.asi); + regs->tstate &= ~TSTATE_ASI; + regs->tstate |= ((asi & 0xffUL) << 24UL); + } + } + + /* User can only change condition codes in %tstate. */ + regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC); + regs->tstate |= psr_to_tstate_icc(psr); + + /* Prevent syscall restart. */ + pt_regs_clear_syscall(regs); + + err |= __get_user(fpu_save, &sf->fpu_save); + if (!err && fpu_save) + err |= restore_fpu_state(regs, compat_ptr(fpu_save)); + err |= get_compat_sigset(&set, &sf->mask); + err |= compat_restore_altstack(&sf->stack); + if (err) + goto segv; + + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(compat_ptr(rwin_save))) + goto segv; + } + + set_current_blocked(&set); + return; +segv: + force_sig(SIGSEGV); +} + +static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) +{ + unsigned long sp; + + regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; + sp = regs->u_regs[UREG_FP]; + + /* + * If we are on the alternate signal stack and would overflow it, don't. + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) + return (void __user *) -1L; + + /* This is the X/Open sanctioned signal stack switching. */ + sp = sigsp(sp, ksig) - framesize; + + /* Always align the stack frame. This handles two cases. First, + * sigaltstack need not be mindful of platform specific stack + * alignment. Second, if we took this signal because the stack + * is not aligned properly, we'd like to take the signal cleanly + * and report that. + */ + sp &= ~15UL; + + return (void __user *) sp; +} + +/* The I-cache flush instruction only works in the primary ASI, which + * right now is the nucleus, aka. kernel space. + * + * Therefore we have to kick the instructions out using the kernel + * side linear mapping of the physical address backing the user + * instructions. + */ +static void flush_signal_insns(unsigned long address) +{ + unsigned long pstate, paddr; + pte_t *ptep, pte; + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + + /* Commit all stores of the instructions we are about to flush. */ + wmb(); + + /* Disable cross-call reception. In this way even a very wide + * munmap() on another cpu can't tear down the page table + * hierarchy from underneath us, since that can't complete + * until the IPI tlb flush returns. + */ + + __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); + __asm__ __volatile__("wrpr %0, %1, %%pstate" + : : "r" (pstate), "i" (PSTATE_IE)); + + pgdp = pgd_offset(current->mm, address); + if (pgd_none(*pgdp)) + goto out_irqs_on; + p4dp = p4d_offset(pgdp, address); + if (p4d_none(*p4dp)) + goto out_irqs_on; + pudp = pud_offset(p4dp, address); + if (pud_none(*pudp)) + goto out_irqs_on; + pmdp = pmd_offset(pudp, address); + if (pmd_none(*pmdp)) + goto out_irqs_on; + + ptep = pte_offset_map(pmdp, address); + pte = *ptep; + if (!pte_present(pte)) + goto out_unmap; + + paddr = (unsigned long) page_address(pte_page(pte)); + + __asm__ __volatile__("flush %0 + %1" + : /* no outputs */ + : "r" (paddr), + "r" (address & (PAGE_SIZE - 1)) + : "memory"); + +out_unmap: + pte_unmap(ptep); +out_irqs_on: + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); + +} + +static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs, + sigset_t *oldset) +{ + struct signal_frame32 __user *sf; + int i, err, wsaved; + void __user *tail; + int sigframe_size; + u32 psr; + compat_sigset_t seta; + + /* 1. Make sure everything is clean */ + synchronize_user_stack(); + save_and_clear_fpu(); + + wsaved = get_thread_wsaved(); + + sigframe_size = sizeof(*sf); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); + + sf = (struct signal_frame32 __user *) + get_sigframe(ksig, regs, sigframe_size); + + if (invalid_frame_pointer(sf, sigframe_size)) { + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig); + return -EINVAL; + } + + tail = (sf + 1); + + /* 2. Save the current process state */ + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + err = put_user(regs->tpc, &sf->info.si_regs.pc); + err |= __put_user(regs->tnpc, &sf->info.si_regs.npc); + err |= __put_user(regs->y, &sf->info.si_regs.y); + psr = tstate_to_psr(regs->tstate); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + psr |= PSR_EF; + err |= __put_user(psr, &sf->info.si_regs.psr); + for (i = 0; i < 16; i++) + err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]); + err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size); + err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]); + for (i = 1; i < 16; i++) + err |= __put_user(((u32 *)regs->u_regs)[2*i], + &sf->v8plus.g_upper[i]); + err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL, + &sf->v8plus.asi); + + if (psr & PSR_EF) { + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user((u64)fp, &sf->fpu_save); + } else { + err |= __put_user(0, &sf->fpu_save); + } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user((u64)rwp, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); + } + + /* If these change we need to know - assignments to seta relies on these sizes */ + BUILD_BUG_ON(_NSIG_WORDS != 1); + BUILD_BUG_ON(_COMPAT_NSIG_WORDS != 2); + seta.sig[1] = (oldset->sig[0] >> 32); + seta.sig[0] = oldset->sig[0]; + + err |= __put_user(seta.sig[0], &sf->info.si_mask); + err |= __copy_to_user(sf->extramask, &seta.sig[1], + (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); + + if (!wsaved) { + err |= raw_copy_in_user((u32 __user *)sf, + (u32 __user *)(regs->u_regs[UREG_FP]), + sizeof(struct reg_window32)); + } else { + struct reg_window *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + for (i = 0; i < 8; i++) + err |= __put_user(rp->locals[i], &sf->ss.locals[i]); + for (i = 0; i < 6; i++) + err |= __put_user(rp->ins[i], &sf->ss.ins[i]); + err |= __put_user(rp->ins[6], &sf->ss.fp); + err |= __put_user(rp->ins[7], &sf->ss.callers_pc); + } + if (err) + return err; + + /* 3. signal handler back-trampoline and parameters */ + regs->u_regs[UREG_FP] = (unsigned long) sf; + regs->u_regs[UREG_I0] = ksig->sig; + regs->u_regs[UREG_I1] = (unsigned long) &sf->info; + regs->u_regs[UREG_I2] = (unsigned long) &sf->info; + + /* 4. signal handler */ + regs->tpc = (unsigned long) ksig->ka.sa.sa_handler; + regs->tnpc = (regs->tpc + 4); + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + + /* 5. return to kernel instructions */ + if (ksig->ka.ka_restorer) { + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; + } else { + unsigned long address = ((unsigned long)&(sf->insns[0])); + + regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); + + err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/ + err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/ + if (err) + return err; + flush_signal_insns(address); + } + return 0; +} + +static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs, + sigset_t *oldset) +{ + struct rt_signal_frame32 __user *sf; + int i, err, wsaved; + void __user *tail; + int sigframe_size; + u32 psr; + + /* 1. Make sure everything is clean */ + synchronize_user_stack(); + save_and_clear_fpu(); + + wsaved = get_thread_wsaved(); + + sigframe_size = sizeof(*sf); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); + + sf = (struct rt_signal_frame32 __user *) + get_sigframe(ksig, regs, sigframe_size); + + if (invalid_frame_pointer(sf, sigframe_size)) { + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig); + return -EINVAL; + } + + tail = (sf + 1); + + /* 2. Save the current process state */ + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + err = put_user(regs->tpc, &sf->regs.pc); + err |= __put_user(regs->tnpc, &sf->regs.npc); + err |= __put_user(regs->y, &sf->regs.y); + psr = tstate_to_psr(regs->tstate); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + psr |= PSR_EF; + err |= __put_user(psr, &sf->regs.psr); + for (i = 0; i < 16; i++) + err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]); + err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size); + err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]); + for (i = 1; i < 16; i++) + err |= __put_user(((u32 *)regs->u_regs)[2*i], + &sf->v8plus.g_upper[i]); + err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL, + &sf->v8plus.asi); + + if (psr & PSR_EF) { + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user((u64)fp, &sf->fpu_save); + } else { + err |= __put_user(0, &sf->fpu_save); + } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user((u64)rwp, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); + } + + /* Update the siginfo structure. */ + err |= copy_siginfo_to_user32(&sf->info, &ksig->info); + + /* Setup sigaltstack */ + err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]); + + err |= put_compat_sigset(&sf->mask, oldset, sizeof(compat_sigset_t)); + + if (!wsaved) { + err |= raw_copy_in_user((u32 __user *)sf, + (u32 __user *)(regs->u_regs[UREG_FP]), + sizeof(struct reg_window32)); + } else { + struct reg_window *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + for (i = 0; i < 8; i++) + err |= __put_user(rp->locals[i], &sf->ss.locals[i]); + for (i = 0; i < 6; i++) + err |= __put_user(rp->ins[i], &sf->ss.ins[i]); + err |= __put_user(rp->ins[6], &sf->ss.fp); + err |= __put_user(rp->ins[7], &sf->ss.callers_pc); + } + if (err) + return err; + + /* 3. signal handler back-trampoline and parameters */ + regs->u_regs[UREG_FP] = (unsigned long) sf; + regs->u_regs[UREG_I0] = ksig->sig; + regs->u_regs[UREG_I1] = (unsigned long) &sf->info; + regs->u_regs[UREG_I2] = (unsigned long) &sf->regs; + + /* 4. signal handler */ + regs->tpc = (unsigned long) ksig->ka.sa.sa_handler; + regs->tnpc = (regs->tpc + 4); + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + + /* 5. return to kernel instructions */ + if (ksig->ka.ka_restorer) + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; + else { + unsigned long address = ((unsigned long)&(sf->insns[0])); + + regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); + + /* mov __NR_rt_sigreturn, %g1 */ + err |= __put_user(0x82102065, &sf->insns[0]); + + /* t 0x10 */ + err |= __put_user(0x91d02010, &sf->insns[1]); + if (err) + return err; + + flush_signal_insns(address); + } + return 0; +} + +static inline void handle_signal32(struct ksignal *ksig, + struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int err; + + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err = setup_rt_frame32(ksig, regs, oldset); + else + err = setup_frame32(ksig, regs, oldset); + + signal_setup_done(err, ksig, 0); +} + +static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, + struct sigaction *sa) +{ + switch (regs->u_regs[UREG_I0]) { + case ERESTART_RESTARTBLOCK: + case ERESTARTNOHAND: + no_system_call_restart: + regs->u_regs[UREG_I0] = EINTR; + regs->tstate |= TSTATE_ICARRY; + break; + case ERESTARTSYS: + if (!(sa->sa_flags & SA_RESTART)) + goto no_system_call_restart; + fallthrough; + case ERESTARTNOINTR: + regs->u_regs[UREG_I0] = orig_i0; + regs->tpc -= 4; + regs->tnpc -= 4; + } +} + +/* Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + */ +void do_signal32(struct pt_regs * regs) +{ + struct ksignal ksig; + unsigned long orig_i0 = 0; + int restart_syscall = 0; + bool has_handler = get_signal(&ksig); + + if (pt_regs_is_syscall(regs) && + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } + + if (has_handler) { + if (restart_syscall) + syscall_restart32(orig_i0, regs, &ksig.ka.sa); + handle_signal32(&ksig, regs); + } else { + if (restart_syscall) { + switch (regs->u_regs[UREG_I0]) { + case ERESTARTNOHAND: + case ERESTARTSYS: + case ERESTARTNOINTR: + /* replay the system call when we are done */ + regs->u_regs[UREG_I0] = orig_i0; + regs->tpc -= 4; + regs->tnpc -= 4; + pt_regs_clear_syscall(regs); + fallthrough; + case ERESTART_RESTARTBLOCK: + regs->u_regs[UREG_G1] = __NR_restart_syscall; + regs->tpc -= 4; + regs->tnpc -= 4; + pt_regs_clear_syscall(regs); + } + } + restore_saved_sigmask(); + } +} + +struct sigstack32 { + u32 the_stack; + int cur_status; +}; + +asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp) +{ + struct sigstack32 __user *ssptr = + (struct sigstack32 __user *)((unsigned long)(u_ssptr)); + struct sigstack32 __user *ossptr = + (struct sigstack32 __user *)((unsigned long)(u_ossptr)); + int ret = -EFAULT; + + /* First see if old state is wanted. */ + if (ossptr) { + if (put_user(current->sas_ss_sp + current->sas_ss_size, + &ossptr->the_stack) || + __put_user(on_sig_stack(sp), &ossptr->cur_status)) + goto out; + } + + /* Now see if we want to update the new state. */ + if (ssptr) { + u32 ss_sp; + + if (get_user(ss_sp, &ssptr->the_stack)) + goto out; + + /* If the current stack was set with sigaltstack, don't + * swap stacks while we are on it. + */ + ret = -EPERM; + if (current->sas_ss_sp && on_sig_stack(sp)) + goto out; + + /* Since we don't know the extent of the stack, and we don't + * track onstack-ness, but rather calculate it, we must + * presume a size. Ho hum this interface is lossy. + */ + current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; + current->sas_ss_size = SIGSTKSZ; + } + + ret = 0; +out: + return ret; +} + +/* + * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as + * changes likely come with new fields that should be added below. + */ +static_assert(NSIGILL == 11); +static_assert(NSIGFPE == 15); +static_assert(NSIGSEGV == 9); +static_assert(NSIGBUS == 5); +static_assert(NSIGTRAP == 6); +static_assert(NSIGCHLD == 6); +static_assert(NSIGSYS == 2); +static_assert(sizeof(compat_siginfo_t) == 128); +static_assert(__alignof__(compat_siginfo_t) == 4); +static_assert(offsetof(compat_siginfo_t, si_signo) == 0x00); +static_assert(offsetof(compat_siginfo_t, si_errno) == 0x04); +static_assert(offsetof(compat_siginfo_t, si_code) == 0x08); +static_assert(offsetof(compat_siginfo_t, si_pid) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_uid) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_tid) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_overrun) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_status) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_utime) == 0x18); +static_assert(offsetof(compat_siginfo_t, si_stime) == 0x1c); +static_assert(offsetof(compat_siginfo_t, si_value) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_int) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_ptr) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_addr) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_trapno) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_addr_lsb) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_lower) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_upper) == 0x18); +static_assert(offsetof(compat_siginfo_t, si_pkey) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_perf_data) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_perf_type) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_perf_flags) == 0x18); +static_assert(offsetof(compat_siginfo_t, si_band) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_fd) == 0x10); diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c new file mode 100644 index 000000000..89b93c713 --- /dev/null +++ b/arch/sparc/kernel/signal_32.c @@ -0,0 +1,565 @@ +// SPDX-License-Identifier: GPL-2.0 +/* linux/arch/sparc/kernel/signal.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* do_coredum */ +#include +#include + +#include +#include +#include /* flush_sig_insns */ +#include + +#include "sigutil.h" +#include "kernel.h" + +extern void fpsave(unsigned long *fpregs, unsigned long *fsr, + void *fpqueue, unsigned long *fpqdepth); +extern void fpload(unsigned long *fpregs, unsigned long *fsr); + +struct signal_frame { + struct sparc_stackf ss; + __siginfo32_t info; + __siginfo_fpu_t __user *fpu_save; + unsigned long insns[2] __attribute__ ((aligned (8))); + unsigned int extramask[_NSIG_WORDS - 1]; + unsigned int extra_size; /* Should be 0 */ + __siginfo_rwin_t __user *rwin_save; +} __attribute__((aligned(8))); + +struct rt_signal_frame { + struct sparc_stackf ss; + siginfo_t info; + struct pt_regs regs; + sigset_t mask; + __siginfo_fpu_t __user *fpu_save; + unsigned int insns[2]; + stack_t stack; + unsigned int extra_size; /* Should be 0 */ + __siginfo_rwin_t __user *rwin_save; +} __attribute__((aligned(8))); + +/* Align macros */ +#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) +#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) + +/* Checks if the fp is valid. We always build signal frames which are + * 16-byte aligned, therefore we can always enforce that the restore + * frame has that property as well. + */ +static inline bool invalid_frame_pointer(void __user *fp, int fplen) +{ + if ((((unsigned long) fp) & 15) || !access_ok(fp, fplen)) + return true; + + return false; +} + +asmlinkage void do_sigreturn(struct pt_regs *regs) +{ + unsigned long up_psr, pc, npc, ufp; + struct signal_frame __user *sf; + sigset_t set; + __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; + int err; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + synchronize_user_stack(); + + sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; + + /* 1. Make sure we are not getting garbage from the user */ + if (invalid_frame_pointer(sf, sizeof(*sf))) + goto segv_and_exit; + + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) + goto segv_and_exit; + + if (ufp & 0x7) + goto segv_and_exit; + + err = __get_user(pc, &sf->info.si_regs.pc); + err |= __get_user(npc, &sf->info.si_regs.npc); + + if ((pc | npc) & 3) + goto segv_and_exit; + + /* 2. Restore the state */ + up_psr = regs->psr; + err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs)); + + /* User can only change condition codes and FPU enabling in %psr. */ + regs->psr = (up_psr & ~(PSR_ICC | PSR_EF)) + | (regs->psr & (PSR_ICC | PSR_EF)); + + /* Prevent syscall restart. */ + pt_regs_clear_syscall(regs); + + err |= __get_user(fpu_save, &sf->fpu_save); + if (fpu_save) + err |= restore_fpu_state(regs, fpu_save); + err |= __get_user(rwin_save, &sf->rwin_save); + if (rwin_save) + err |= restore_rwin_state(rwin_save); + + /* This is pretty much atomic, no amount locking would prevent + * the races which exist anyways. + */ + err |= __get_user(set.sig[0], &sf->info.si_mask); + err |= __copy_from_user(&set.sig[1], &sf->extramask, + (_NSIG_WORDS-1) * sizeof(unsigned int)); + + if (err) + goto segv_and_exit; + + set_current_blocked(&set); + return; + +segv_and_exit: + force_sig(SIGSEGV); +} + +asmlinkage void do_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_signal_frame __user *sf; + unsigned int psr, pc, npc, ufp; + __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; + sigset_t set; + int err; + + synchronize_user_stack(); + sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; + if (invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) + goto segv; + + err = __get_user(pc, &sf->regs.pc); + err |= __get_user(npc, &sf->regs.npc); + err |= ((pc | npc) & 0x03); + + err |= __get_user(regs->y, &sf->regs.y); + err |= __get_user(psr, &sf->regs.psr); + + err |= __copy_from_user(®s->u_regs[UREG_G1], + &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32)); + + regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC); + + /* Prevent syscall restart. */ + pt_regs_clear_syscall(regs); + + err |= __get_user(fpu_save, &sf->fpu_save); + if (!err && fpu_save) + err |= restore_fpu_state(regs, fpu_save); + err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); + err |= restore_altstack(&sf->stack); + + if (err) + goto segv; + + regs->pc = pc; + regs->npc = npc; + + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(rwin_save)) + goto segv; + } + + set_current_blocked(&set); + return; +segv: + force_sig(SIGSEGV); +} + +static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) +{ + unsigned long sp = regs->u_regs[UREG_FP]; + + /* + * If we are on the alternate signal stack and would overflow it, don't. + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) + return (void __user *) -1L; + + /* This is the X/Open sanctioned signal stack switching. */ + sp = sigsp(sp, ksig) - framesize; + + /* Always align the stack frame. This handles two cases. First, + * sigaltstack need not be mindful of platform specific stack + * alignment. Second, if we took this signal because the stack + * is not aligned properly, we'd like to take the signal cleanly + * and report that. + */ + sp &= ~15UL; + + return (void __user *) sp; +} + +static int setup_frame(struct ksignal *ksig, struct pt_regs *regs, + sigset_t *oldset) +{ + struct signal_frame __user *sf; + int sigframe_size, err, wsaved; + void __user *tail; + + /* 1. Make sure everything is clean */ + synchronize_user_stack(); + + wsaved = current_thread_info()->w_saved; + + sigframe_size = sizeof(*sf); + if (used_math()) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); + + sf = (struct signal_frame __user *) + get_sigframe(ksig, regs, sigframe_size); + + if (invalid_frame_pointer(sf, sigframe_size)) { + force_exit_sig(SIGILL); + return -EINVAL; + } + + tail = sf + 1; + + /* 2. Save the current process state */ + err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs)); + + err |= __put_user(0, &sf->extra_size); + + if (used_math()) { + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user(fp, &sf->fpu_save); + } else { + err |= __put_user(0, &sf->fpu_save); + } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user(rwp, &sf->rwin_save); + } else { + err |= __put_user(0, &sf->rwin_save); + } + + err |= __put_user(oldset->sig[0], &sf->info.si_mask); + err |= __copy_to_user(sf->extramask, &oldset->sig[1], + (_NSIG_WORDS - 1) * sizeof(unsigned int)); + if (!wsaved) { + err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], + sizeof(struct reg_window32)); + } else { + struct reg_window32 *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); + } + if (err) + return err; + + /* 3. signal handler back-trampoline and parameters */ + regs->u_regs[UREG_FP] = (unsigned long) sf; + regs->u_regs[UREG_I0] = ksig->sig; + regs->u_regs[UREG_I1] = (unsigned long) &sf->info; + regs->u_regs[UREG_I2] = (unsigned long) &sf->info; + + /* 4. signal handler */ + regs->pc = (unsigned long) ksig->ka.sa.sa_handler; + regs->npc = (regs->pc + 4); + + /* 5. return to kernel instructions */ + if (ksig->ka.ka_restorer) + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; + else { + regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); + + /* mov __NR_sigreturn, %g1 */ + err |= __put_user(0x821020d8, &sf->insns[0]); + + /* t 0x10 */ + err |= __put_user(0x91d02010, &sf->insns[1]); + if (err) + return err; + + /* Flush instruction space. */ + flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); + } + return 0; +} + +static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs, + sigset_t *oldset) +{ + struct rt_signal_frame __user *sf; + int sigframe_size, wsaved; + void __user *tail; + unsigned int psr; + int err; + + synchronize_user_stack(); + wsaved = current_thread_info()->w_saved; + sigframe_size = sizeof(*sf); + if (used_math()) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); + sf = (struct rt_signal_frame __user *) + get_sigframe(ksig, regs, sigframe_size); + if (invalid_frame_pointer(sf, sigframe_size)) { + force_exit_sig(SIGILL); + return -EINVAL; + } + + tail = sf + 1; + err = __put_user(regs->pc, &sf->regs.pc); + err |= __put_user(regs->npc, &sf->regs.npc); + err |= __put_user(regs->y, &sf->regs.y); + psr = regs->psr; + if (used_math()) + psr |= PSR_EF; + err |= __put_user(psr, &sf->regs.psr); + err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs)); + err |= __put_user(0, &sf->extra_size); + + if (psr & PSR_EF) { + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user(fp, &sf->fpu_save); + } else { + err |= __put_user(0, &sf->fpu_save); + } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user(rwp, &sf->rwin_save); + } else { + err |= __put_user(0, &sf->rwin_save); + } + err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t)); + + /* Setup sigaltstack */ + err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]); + + if (!wsaved) { + err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], + sizeof(struct reg_window32)); + } else { + struct reg_window32 *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); + } + + err |= copy_siginfo_to_user(&sf->info, &ksig->info); + + if (err) + return err; + + regs->u_regs[UREG_FP] = (unsigned long) sf; + regs->u_regs[UREG_I0] = ksig->sig; + regs->u_regs[UREG_I1] = (unsigned long) &sf->info; + regs->u_regs[UREG_I2] = (unsigned long) &sf->regs; + + regs->pc = (unsigned long) ksig->ka.sa.sa_handler; + regs->npc = (regs->pc + 4); + + if (ksig->ka.ka_restorer) + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; + else { + regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); + + /* mov __NR_rt_sigreturn, %g1 */ + err |= __put_user(0x82102065, &sf->insns[0]); + + /* t 0x10 */ + err |= __put_user(0x91d02010, &sf->insns[1]); + if (err) + return err; + + /* Flush instruction space. */ + flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); + } + return 0; +} + +static inline void +handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int err; + + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err = setup_rt_frame(ksig, regs, oldset); + else + err = setup_frame(ksig, regs, oldset); + signal_setup_done(err, ksig, 0); +} + +static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, + struct sigaction *sa) +{ + switch(regs->u_regs[UREG_I0]) { + case ERESTART_RESTARTBLOCK: + case ERESTARTNOHAND: + no_system_call_restart: + regs->u_regs[UREG_I0] = EINTR; + regs->psr |= PSR_C; + break; + case ERESTARTSYS: + if (!(sa->sa_flags & SA_RESTART)) + goto no_system_call_restart; + fallthrough; + case ERESTARTNOINTR: + regs->u_regs[UREG_I0] = orig_i0; + regs->pc -= 4; + regs->npc -= 4; + } +} + +/* Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + */ +static void do_signal(struct pt_regs *regs, unsigned long orig_i0) +{ + struct ksignal ksig; + int restart_syscall; + bool has_handler; + + /* It's a lot of work and synchronization to add a new ptrace + * register for GDB to save and restore in order to get + * orig_i0 correct for syscall restarts when debugging. + * + * Although it should be the case that most of the global + * registers are volatile across a system call, glibc already + * depends upon that fact that we preserve them. So we can't + * just use any global register to save away the orig_i0 value. + * + * In particular %g2, %g3, %g4, and %g5 are all assumed to be + * preserved across a system call trap by various pieces of + * code in glibc. + * + * %g7 is used as the "thread register". %g6 is not used in + * any fixed manner. %g6 is used as a scratch register and + * a compiler temporary, but it's value is never used across + * a system call. Therefore %g6 is usable for orig_i0 storage. + */ + if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) + regs->u_regs[UREG_G6] = orig_i0; + + has_handler = get_signal(&ksig); + + /* If the debugger messes with the program counter, it clears + * the software "in syscall" bit, directing us to not perform + * a syscall restart. + */ + restart_syscall = 0; + if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } + + if (has_handler) { + if (restart_syscall) + syscall_restart(orig_i0, regs, &ksig.ka.sa); + handle_signal(&ksig, regs); + } else { + if (restart_syscall) { + switch (regs->u_regs[UREG_I0]) { + case ERESTARTNOHAND: + case ERESTARTSYS: + case ERESTARTNOINTR: + /* replay the system call when we are done */ + regs->u_regs[UREG_I0] = orig_i0; + regs->pc -= 4; + regs->npc -= 4; + pt_regs_clear_syscall(regs); + fallthrough; + case ERESTART_RESTARTBLOCK: + regs->u_regs[UREG_G1] = __NR_restart_syscall; + regs->pc -= 4; + regs->npc -= 4; + pt_regs_clear_syscall(regs); + } + } + restore_saved_sigmask(); + } +} + +void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, + unsigned long thread_info_flags) +{ + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs, orig_i0); + if (thread_info_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); +} + +asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr, + struct sigstack __user *ossptr, + unsigned long sp) +{ + int ret = -EFAULT; + + /* First see if old state is wanted. */ + if (ossptr) { + if (put_user(current->sas_ss_sp + current->sas_ss_size, + &ossptr->the_stack) || + __put_user(on_sig_stack(sp), &ossptr->cur_status)) + goto out; + } + + /* Now see if we want to update the new state. */ + if (ssptr) { + char *ss_sp; + + if (get_user(ss_sp, &ssptr->the_stack)) + goto out; + /* If the current stack was set with sigaltstack, don't + swap stacks while we are on it. */ + ret = -EPERM; + if (current->sas_ss_sp && on_sig_stack(sp)) + goto out; + + /* Since we don't know the extent of the stack, and we don't + track onstack-ness, but rather calculate it, we must + presume a size. Ho hum this interface is lossy. */ + current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; + current->sas_ss_size = SIGSTKSZ; + } + ret = 0; +out: + return ret; +} diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c new file mode 100644 index 000000000..570e43e6f --- /dev/null +++ b/arch/sparc/kernel/signal_64.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sparc64/kernel/signal.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sigutil.h" +#include "systbls.h" +#include "kernel.h" +#include "entry.h" + +/* {set, get}context() needed for 64-bit SparcLinux userland. */ +asmlinkage void sparc64_set_context(struct pt_regs *regs) +{ + struct ucontext __user *ucp = (struct ucontext __user *) + regs->u_regs[UREG_I0]; + enum ctx_state prev_state = exception_enter(); + mc_gregset_t __user *grp; + unsigned long pc, npc, tstate; + unsigned long fp, i7; + unsigned char fenab; + int err; + + synchronize_user_stack(); + if (get_thread_wsaved() || + (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || + (!__access_ok(ucp, sizeof(*ucp)))) + goto do_sigsegv; + grp = &ucp->uc_mcontext.mc_gregs; + err = __get_user(pc, &((*grp)[MC_PC])); + err |= __get_user(npc, &((*grp)[MC_NPC])); + if (err || ((pc | npc) & 3)) + goto do_sigsegv; + if (regs->u_regs[UREG_I1]) { + sigset_t set; + + if (_NSIG_WORDS == 1) { + if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0])) + goto do_sigsegv; + } else { + if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t))) + goto do_sigsegv; + } + set_current_blocked(&set); + } + if (test_thread_flag(TIF_32BIT)) { + pc &= 0xffffffff; + npc &= 0xffffffff; + } + regs->tpc = pc; + regs->tnpc = npc; + err |= __get_user(regs->y, &((*grp)[MC_Y])); + err |= __get_user(tstate, &((*grp)[MC_TSTATE])); + regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); + regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); + err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1])); + err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2])); + err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3])); + err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4])); + err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5])); + err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6])); + + /* Skip %g7 as that's the thread register in userspace. */ + + err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0])); + err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1])); + err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2])); + err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3])); + err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4])); + err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5])); + err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6])); + err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7])); + + err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp)); + err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7)); + err |= __put_user(fp, + (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); + err |= __put_user(i7, + (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); + + err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab)); + if (fenab) { + unsigned long *fpregs = current_thread_info()->fpregs; + unsigned long fprs; + + fprs_write(0); + err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs)); + if (fprs & FPRS_DL) + err |= copy_from_user(fpregs, + &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs), + (sizeof(unsigned int) * 32)); + if (fprs & FPRS_DU) + err |= copy_from_user(fpregs+16, + ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16, + (sizeof(unsigned int) * 32)); + err |= __get_user(current_thread_info()->xfsr[0], + &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr)); + err |= __get_user(current_thread_info()->gsr[0], + &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr)); + regs->tstate &= ~TSTATE_PEF; + } + if (err) + goto do_sigsegv; +out: + exception_exit(prev_state); + return; +do_sigsegv: + force_sig(SIGSEGV); + goto out; +} + +asmlinkage void sparc64_get_context(struct pt_regs *regs) +{ + struct ucontext __user *ucp = (struct ucontext __user *) + regs->u_regs[UREG_I0]; + enum ctx_state prev_state = exception_enter(); + mc_gregset_t __user *grp; + mcontext_t __user *mcp; + unsigned long fp, i7; + unsigned char fenab; + int err; + + synchronize_user_stack(); + if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp))) + goto do_sigsegv; + +#if 1 + fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */ +#else + fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF); +#endif + + mcp = &ucp->uc_mcontext; + grp = &mcp->mc_gregs; + + /* Skip over the trap instruction, first. */ + if (test_thread_flag(TIF_32BIT)) { + regs->tpc = (regs->tnpc & 0xffffffff); + regs->tnpc = (regs->tnpc + 4) & 0xffffffff; + } else { + regs->tpc = regs->tnpc; + regs->tnpc += 4; + } + err = 0; + if (_NSIG_WORDS == 1) + err |= __put_user(current->blocked.sig[0], + (unsigned long __user *)&ucp->uc_sigmask); + else + err |= __copy_to_user(&ucp->uc_sigmask, ¤t->blocked, + sizeof(sigset_t)); + + err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE])); + err |= __put_user(regs->tpc, &((*grp)[MC_PC])); + err |= __put_user(regs->tnpc, &((*grp)[MC_NPC])); + err |= __put_user(regs->y, &((*grp)[MC_Y])); + err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1])); + err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2])); + err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3])); + err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4])); + err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5])); + err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6])); + err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7])); + err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0])); + err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1])); + err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2])); + err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3])); + err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4])); + err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5])); + err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6])); + err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7])); + + err |= __get_user(fp, + (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); + err |= __get_user(i7, + (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); + err |= __put_user(fp, &(mcp->mc_fp)); + err |= __put_user(i7, &(mcp->mc_i7)); + + err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab)); + if (fenab) { + unsigned long *fpregs = current_thread_info()->fpregs; + unsigned long fprs; + + fprs = current_thread_info()->fpsaved[0]; + if (fprs & FPRS_DL) + err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs, + (sizeof(unsigned int) * 32)); + if (fprs & FPRS_DU) + err |= copy_to_user( + ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16, + (sizeof(unsigned int) * 32)); + err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr)); + err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr)); + err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs)); + } + if (err) + goto do_sigsegv; +out: + exception_exit(prev_state); + return; +do_sigsegv: + force_sig(SIGSEGV); + goto out; +} + +/* Checks if the fp is valid. We always build rt signal frames which + * are 16-byte aligned, therefore we can always enforce that the + * restore frame has that property as well. + */ +static bool invalid_frame_pointer(void __user *fp) +{ + if (((unsigned long) fp) & 15) + return true; + return false; +} + +struct rt_signal_frame { + struct sparc_stackf ss; + siginfo_t info; + struct pt_regs regs; + __siginfo_fpu_t __user *fpu_save; + stack_t stack; + sigset_t mask; + __siginfo_rwin_t *rwin_save; +}; + +void do_rt_sigreturn(struct pt_regs *regs) +{ + unsigned long tpc, tnpc, tstate, ufp; + struct rt_signal_frame __user *sf; + __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; + sigset_t set; + int err; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + synchronize_user_stack (); + sf = (struct rt_signal_frame __user *) + (regs->u_regs [UREG_FP] + STACK_BIAS); + + /* 1. Make sure we are not getting garbage from the user */ + if (invalid_frame_pointer(sf)) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if ((ufp + STACK_BIAS) & 0x7) + goto segv; + + err = __get_user(tpc, &sf->regs.tpc); + err |= __get_user(tnpc, &sf->regs.tnpc); + if (test_thread_flag(TIF_32BIT)) { + tpc &= 0xffffffff; + tnpc &= 0xffffffff; + } + err |= ((tpc | tnpc) & 3); + + /* 2. Restore the state */ + err |= __get_user(regs->y, &sf->regs.y); + err |= __get_user(tstate, &sf->regs.tstate); + err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs)); + + /* User can only change condition codes and %asi in %tstate. */ + regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); + regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); + + err |= __get_user(fpu_save, &sf->fpu_save); + if (!err && fpu_save) + err |= restore_fpu_state(regs, fpu_save); + + err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); + err |= restore_altstack(&sf->stack); + if (err) + goto segv; + + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(rwin_save)) + goto segv; + } + + regs->tpc = tpc; + regs->tnpc = tnpc; + + /* Prevent syscall restart. */ + pt_regs_clear_syscall(regs); + + set_current_blocked(&set); + return; +segv: + force_sig(SIGSEGV); +} + +static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) +{ + unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; + + /* + * If we are on the alternate signal stack and would overflow it, don't. + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) + return (void __user *) -1L; + + /* This is the X/Open sanctioned signal stack switching. */ + sp = sigsp(sp, ksig) - framesize; + + /* Always align the stack frame. This handles two cases. First, + * sigaltstack need not be mindful of platform specific stack + * alignment. Second, if we took this signal because the stack + * is not aligned properly, we'd like to take the signal cleanly + * and report that. + */ + sp &= ~15UL; + + return (void __user *) sp; +} + +static inline int +setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) +{ + struct rt_signal_frame __user *sf; + int wsaved, err, sf_size; + void __user *tail; + + /* 1. Make sure everything is clean */ + synchronize_user_stack(); + save_and_clear_fpu(); + + wsaved = get_thread_wsaved(); + + sf_size = sizeof(struct rt_signal_frame); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sf_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sf_size += sizeof(__siginfo_rwin_t); + sf = (struct rt_signal_frame __user *) + get_sigframe(ksig, regs, sf_size); + + if (invalid_frame_pointer (sf)) { + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig); + return -EINVAL; + } + + tail = (sf + 1); + + /* 2. Save the current process state */ + err = copy_to_user(&sf->regs, regs, sizeof (*regs)); + + if (current_thread_info()->fpsaved[0] & FPRS_FEF) { + __siginfo_fpu_t __user *fpu_save = tail; + tail += sizeof(__siginfo_fpu_t); + err |= save_fpu_state(regs, fpu_save); + err |= __put_user((u64)fpu_save, &sf->fpu_save); + } else { + err |= __put_user(0, &sf->fpu_save); + } + if (wsaved) { + __siginfo_rwin_t __user *rwin_save = tail; + tail += sizeof(__siginfo_rwin_t); + err |= save_rwin_state(wsaved, rwin_save); + err |= __put_user((u64)rwin_save, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); + } + + /* Setup sigaltstack */ + err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]); + + err |= copy_to_user(&sf->mask, sigmask_to_save(), sizeof(sigset_t)); + + if (!wsaved) { + err |= raw_copy_in_user((u64 __user *)sf, + (u64 __user *)(regs->u_regs[UREG_FP] + + STACK_BIAS), + sizeof(struct reg_window)); + } else { + struct reg_window *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= copy_to_user(sf, rp, sizeof(struct reg_window)); + } + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&sf->info, &ksig->info); + else { + err |= __put_user(ksig->sig, &sf->info.si_signo); + err |= __put_user(SI_NOINFO, &sf->info.si_code); + } + if (err) + return err; + + /* 3. signal handler back-trampoline and parameters */ + regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS; + regs->u_regs[UREG_I0] = ksig->sig; + regs->u_regs[UREG_I1] = (unsigned long) &sf->info; + + /* The sigcontext is passed in this way because of how it + * is defined in GLIBC's /usr/include/bits/sigcontext.h + * for sparc64. It includes the 128 bytes of siginfo_t. + */ + regs->u_regs[UREG_I2] = (unsigned long) &sf->info; + + /* 5. signal handler */ + regs->tpc = (unsigned long) ksig->ka.sa.sa_handler; + regs->tnpc = (regs->tpc + 4); + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + /* 4. return to kernel instructions */ + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; + return 0; +} + +static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, + struct sigaction *sa) +{ + switch (regs->u_regs[UREG_I0]) { + case ERESTART_RESTARTBLOCK: + case ERESTARTNOHAND: + no_system_call_restart: + regs->u_regs[UREG_I0] = EINTR; + regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY); + break; + case ERESTARTSYS: + if (!(sa->sa_flags & SA_RESTART)) + goto no_system_call_restart; + fallthrough; + case ERESTARTNOINTR: + regs->u_regs[UREG_I0] = orig_i0; + regs->tpc -= 4; + regs->tnpc -= 4; + } +} + +/* Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + */ +static void do_signal(struct pt_regs *regs, unsigned long orig_i0) +{ + struct ksignal ksig; + int restart_syscall; + bool has_handler; + + /* It's a lot of work and synchronization to add a new ptrace + * register for GDB to save and restore in order to get + * orig_i0 correct for syscall restarts when debugging. + * + * Although it should be the case that most of the global + * registers are volatile across a system call, glibc already + * depends upon that fact that we preserve them. So we can't + * just use any global register to save away the orig_i0 value. + * + * In particular %g2, %g3, %g4, and %g5 are all assumed to be + * preserved across a system call trap by various pieces of + * code in glibc. + * + * %g7 is used as the "thread register". %g6 is not used in + * any fixed manner. %g6 is used as a scratch register and + * a compiler temporary, but it's value is never used across + * a system call. Therefore %g6 is usable for orig_i0 storage. + */ + if (pt_regs_is_syscall(regs) && + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) + regs->u_regs[UREG_G6] = orig_i0; + +#ifdef CONFIG_COMPAT + if (test_thread_flag(TIF_32BIT)) { + do_signal32(regs); + return; + } +#endif + + has_handler = get_signal(&ksig); + + restart_syscall = 0; + if (pt_regs_is_syscall(regs) && + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } + + if (has_handler) { + if (restart_syscall) + syscall_restart(orig_i0, regs, &ksig.ka.sa); + signal_setup_done(setup_rt_frame(&ksig, regs), &ksig, 0); + } else { + if (restart_syscall) { + switch (regs->u_regs[UREG_I0]) { + case ERESTARTNOHAND: + case ERESTARTSYS: + case ERESTARTNOINTR: + /* replay the system call when we are done */ + regs->u_regs[UREG_I0] = orig_i0; + regs->tpc -= 4; + regs->tnpc -= 4; + pt_regs_clear_syscall(regs); + fallthrough; + case ERESTART_RESTARTBLOCK: + regs->u_regs[UREG_G1] = __NR_restart_syscall; + regs->tpc -= 4; + regs->tnpc -= 4; + pt_regs_clear_syscall(regs); + } + } + restore_saved_sigmask(); + } +} + +void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) +{ + user_exit(); + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs, orig_i0); + if (thread_info_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + user_enter(); +} + +/* + * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as + * changes likely come with new fields that should be added below. + */ +static_assert(NSIGILL == 11); +static_assert(NSIGFPE == 15); +static_assert(NSIGSEGV == 9); +static_assert(NSIGBUS == 5); +static_assert(NSIGTRAP == 6); +static_assert(NSIGCHLD == 6); +static_assert(NSIGSYS == 2); +static_assert(sizeof(siginfo_t) == 128); +static_assert(__alignof__(siginfo_t) == 8); +static_assert(offsetof(siginfo_t, si_signo) == 0x00); +static_assert(offsetof(siginfo_t, si_errno) == 0x04); +static_assert(offsetof(siginfo_t, si_code) == 0x08); +static_assert(offsetof(siginfo_t, si_pid) == 0x10); +static_assert(offsetof(siginfo_t, si_uid) == 0x14); +static_assert(offsetof(siginfo_t, si_tid) == 0x10); +static_assert(offsetof(siginfo_t, si_overrun) == 0x14); +static_assert(offsetof(siginfo_t, si_status) == 0x18); +static_assert(offsetof(siginfo_t, si_utime) == 0x20); +static_assert(offsetof(siginfo_t, si_stime) == 0x28); +static_assert(offsetof(siginfo_t, si_value) == 0x18); +static_assert(offsetof(siginfo_t, si_int) == 0x18); +static_assert(offsetof(siginfo_t, si_ptr) == 0x18); +static_assert(offsetof(siginfo_t, si_addr) == 0x10); +static_assert(offsetof(siginfo_t, si_trapno) == 0x18); +static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); +static_assert(offsetof(siginfo_t, si_lower) == 0x20); +static_assert(offsetof(siginfo_t, si_upper) == 0x28); +static_assert(offsetof(siginfo_t, si_pkey) == 0x20); +static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); +static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); +static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); +static_assert(offsetof(siginfo_t, si_band) == 0x10); +static_assert(offsetof(siginfo_t, si_fd) == 0x14); diff --git a/arch/sparc/kernel/sigutil.h b/arch/sparc/kernel/sigutil.h new file mode 100644 index 000000000..21d332d8b --- /dev/null +++ b/arch/sparc/kernel/sigutil.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SIGUTIL_H +#define _SIGUTIL_H + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu); +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu); +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin); +int restore_rwin_state(__siginfo_rwin_t __user *rp); + +#endif /* _SIGUTIL_H */ diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c new file mode 100644 index 000000000..f25c6daa9 --- /dev/null +++ b/arch/sparc/kernel/sigutil_32.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "sigutil.h" + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + int err = 0; +#ifdef CONFIG_SMP + if (test_tsk_thread_flag(current, TIF_USEDFPU)) { + put_psr(get_psr() | PSR_EF); + fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, + ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); + regs->psr &= ~(PSR_EF); + clear_tsk_thread_flag(current, TIF_USEDFPU); + } +#else + if (current == last_task_used_math) { + put_psr(get_psr() | PSR_EF); + fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, + ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); + last_task_used_math = NULL; + regs->psr &= ~(PSR_EF); + } +#endif + err |= __copy_to_user(&fpu->si_float_regs[0], + ¤t->thread.float_regs[0], + (sizeof(unsigned long) * 32)); + err |= __put_user(current->thread.fsr, &fpu->si_fsr); + err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); + if (current->thread.fpqdepth != 0) + err |= __copy_to_user(&fpu->si_fpqueue[0], + ¤t->thread.fpqueue[0], + ((sizeof(unsigned long) + + (sizeof(unsigned long *)))*16)); + clear_used_math(); + return err; +} + +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + int err; + + if (((unsigned long) fpu) & 3) + return -EFAULT; + +#ifdef CONFIG_SMP + if (test_tsk_thread_flag(current, TIF_USEDFPU)) + regs->psr &= ~PSR_EF; +#else + if (current == last_task_used_math) { + last_task_used_math = NULL; + regs->psr &= ~PSR_EF; + } +#endif + set_used_math(); + clear_tsk_thread_flag(current, TIF_USEDFPU); + + if (!access_ok(fpu, sizeof(*fpu))) + return -EFAULT; + + err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], + (sizeof(unsigned long) * 32)); + err |= __get_user(current->thread.fsr, &fpu->si_fsr); + err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); + if (current->thread.fpqdepth != 0) + err |= __copy_from_user(¤t->thread.fpqueue[0], + &fpu->si_fpqueue[0], + ((sizeof(unsigned long) + + (sizeof(unsigned long *)))*16)); + return err; +} + +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) +{ + int i, err = __put_user(wsaved, &rwin->wsaved); + + for (i = 0; i < wsaved; i++) { + struct reg_window32 *rp; + unsigned long fp; + + rp = ¤t_thread_info()->reg_window[i]; + fp = current_thread_info()->rwbuf_stkptrs[i]; + err |= copy_to_user(&rwin->reg_window[i], rp, + sizeof(struct reg_window32)); + err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); + } + return err; +} + +int restore_rwin_state(__siginfo_rwin_t __user *rp) +{ + struct thread_info *t = current_thread_info(); + int i, wsaved, err; + + if (((unsigned long) rp) & 3) + return -EFAULT; + + get_user(wsaved, &rp->wsaved); + if (wsaved > NSWINS) + return -EFAULT; + + err = 0; + for (i = 0; i < wsaved; i++) { + err |= copy_from_user(&t->reg_window[i], + &rp->reg_window[i], + sizeof(struct reg_window32)); + err |= __get_user(t->rwbuf_stkptrs[i], + &rp->rwbuf_stkptrs[i]); + } + if (err) + return err; + + t->w_saved = wsaved; + synchronize_user_stack(); + if (t->w_saved) + return -EFAULT; + return 0; + +} diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c new file mode 100644 index 000000000..512e4639e --- /dev/null +++ b/arch/sparc/kernel/sigutil_64.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "sigutil.h" + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + unsigned long *fpregs = current_thread_info()->fpregs; + unsigned long fprs; + int err = 0; + + fprs = current_thread_info()->fpsaved[0]; + if (fprs & FPRS_DL) + err |= copy_to_user(&fpu->si_float_regs[0], fpregs, + (sizeof(unsigned int) * 32)); + if (fprs & FPRS_DU) + err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, + (sizeof(unsigned int) * 32)); + err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); + err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); + err |= __put_user(fprs, &fpu->si_fprs); + + return err; +} + +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + unsigned long *fpregs = current_thread_info()->fpregs; + unsigned long fprs; + int err; + + if (((unsigned long) fpu) & 7) + return -EFAULT; + + err = get_user(fprs, &fpu->si_fprs); + fprs_write(0); + regs->tstate &= ~TSTATE_PEF; + if (fprs & FPRS_DL) + err |= copy_from_user(fpregs, &fpu->si_float_regs[0], + (sizeof(unsigned int) * 32)); + if (fprs & FPRS_DU) + err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], + (sizeof(unsigned int) * 32)); + err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); + err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); + current_thread_info()->fpsaved[0] |= fprs; + return err; +} + +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) +{ + int i, err = __put_user(wsaved, &rwin->wsaved); + + for (i = 0; i < wsaved; i++) { + struct reg_window *rp = ¤t_thread_info()->reg_window[i]; + unsigned long fp = current_thread_info()->rwbuf_stkptrs[i]; + + err |= copy_to_user(&rwin->reg_window[i], rp, + sizeof(struct reg_window)); + err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); + } + return err; +} + +int restore_rwin_state(__siginfo_rwin_t __user *rp) +{ + struct thread_info *t = current_thread_info(); + int i, wsaved, err; + + if (((unsigned long) rp) & 7) + return -EFAULT; + + get_user(wsaved, &rp->wsaved); + if (wsaved > NSWINS) + return -EFAULT; + + err = 0; + for (i = 0; i < wsaved; i++) { + err |= copy_from_user(&t->reg_window[i], + &rp->reg_window[i], + sizeof(struct reg_window)); + err |= __get_user(t->rwbuf_stkptrs[i], + &rp->rwbuf_stkptrs[i]); + } + if (err) + return err; + + set_thread_wsaved(wsaved); + synchronize_user_stack(); + if (get_thread_wsaved()) + return -EFAULT; + return 0; +} diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c new file mode 100644 index 000000000..ad8094d95 --- /dev/null +++ b/arch/sparc/kernel/smp_32.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0 +/* smp.c: Sparc SMP support. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org) + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "irq.h" + +volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; + +cpumask_t smp_commenced_mask = CPU_MASK_NONE; + +const struct sparc32_ipi_ops *sparc32_ipi_ops; + +/* The only guaranteed locking primitive available on all Sparc + * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically + * places the current byte at the effective address into dest_reg and + * places 0xff there afterwards. Pretty lame locking primitive + * compared to the Alpha and the Intel no? Most Sparcs have 'swap' + * instruction which is much better... + */ + +void smp_store_cpu_info(int id) +{ + int cpu_node; + int mid; + + cpu_data(id).udelay_val = loops_per_jiffy; + + cpu_find_by_mid(id, &cpu_node); + cpu_data(id).clock_tick = prom_getintdefault(cpu_node, + "clock-frequency", 0); + cpu_data(id).prom_node = cpu_node; + mid = cpu_get_hwmid(cpu_node); + + if (mid < 0) { + printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08x", id, cpu_node); + mid = 0; + } + cpu_data(id).mid = mid; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + unsigned long bogosum = 0; + int cpu, num = 0; + + for_each_online_cpu(cpu) { + num++; + bogosum += cpu_data(cpu).udelay_val; + } + + printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", + num, bogosum/(500000/HZ), + (bogosum/(5000/HZ))%100); + + switch(sparc_cpu_model) { + case sun4m: + smp4m_smp_done(); + break; + case sun4d: + smp4d_smp_done(); + break; + case sparc_leon: + leon_smp_done(); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + } +} + +void cpu_panic(void) +{ + printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); + panic("SMP bolixed\n"); +} + +struct linux_prom_registers smp_penguin_ctable = { 0 }; + +void smp_send_reschedule(int cpu) +{ + /* + * CPU model dependent way of implementing IPI generation targeting + * a single CPU. The trap handler needs only to do trap entry/return + * to call schedule. + */ + sparc32_ipi_ops->resched(cpu); +} + +void smp_send_stop(void) +{ +} + +void arch_send_call_function_single_ipi(int cpu) +{ + /* trigger one IPI single call on one CPU */ + sparc32_ipi_ops->single(cpu); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + int cpu; + + /* trigger IPI mask call on each CPU */ + for_each_cpu(cpu, mask) + sparc32_ipi_ops->mask_one(cpu); +} + +void smp_resched_interrupt(void) +{ + irq_enter(); + scheduler_ipi(); + local_cpu_data().irq_resched_count++; + irq_exit(); + /* re-schedule routine called by interrupt return code. */ +} + +void smp_call_function_single_interrupt(void) +{ + irq_enter(); + generic_smp_call_function_single_interrupt(); + local_cpu_data().irq_call_count++; + irq_exit(); +} + +void smp_call_function_interrupt(void) +{ + irq_enter(); + generic_smp_call_function_interrupt(); + local_cpu_data().irq_call_count++; + irq_exit(); +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + int i, cpuid, extra; + + printk("Entering SMP Mode...\n"); + + extra = 0; + for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) { + if (cpuid >= NR_CPUS) + extra++; + } + /* i = number of cpus */ + if (extra && max_cpus > i - extra) + printk("Warning: NR_CPUS is too low to start all cpus\n"); + + smp_store_cpu_info(boot_cpu_id); + + switch(sparc_cpu_model) { + case sun4m: + smp4m_boot_cpus(); + break; + case sun4d: + smp4d_boot_cpus(); + break; + case sparc_leon: + leon_boot_cpus(); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + } +} + +/* Set this up early so that things like the scheduler can init + * properly. We use the same cpu mask for both the present and + * possible cpu map. + */ +void __init smp_setup_cpu_possible_map(void) +{ + int instance, mid; + + instance = 0; + while (!cpu_find_by_instance(instance, NULL, &mid)) { + if (mid < NR_CPUS) { + set_cpu_possible(mid, true); + set_cpu_present(mid, true); + } + instance++; + } +} + +void __init smp_prepare_boot_cpu(void) +{ + int cpuid = hard_smp_processor_id(); + + if (cpuid >= NR_CPUS) { + prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); + prom_halt(); + } + if (cpuid != 0) + printk("boot cpu id != 0, this could work but is untested\n"); + + current_thread_info()->cpu = cpuid; + set_cpu_online(cpuid, true); + set_cpu_possible(cpuid, true); +} + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + int ret=0; + + switch(sparc_cpu_model) { + case sun4m: + ret = smp4m_boot_one_cpu(cpu, tidle); + break; + case sun4d: + ret = smp4d_boot_one_cpu(cpu, tidle); + break; + case sparc_leon: + ret = leon_boot_one_cpu(cpu, tidle); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + } + + if (!ret) { + cpumask_set_cpu(cpu, &smp_commenced_mask); + while (!cpu_online(cpu)) + mb(); + } + return ret; +} + +static void arch_cpu_pre_starting(void *arg) +{ + local_ops->cache_all(); + local_ops->tlb_all(); + + switch(sparc_cpu_model) { + case sun4m: + sun4m_cpu_pre_starting(arg); + break; + case sun4d: + sun4d_cpu_pre_starting(arg); + break; + case sparc_leon: + leon_cpu_pre_starting(arg); + break; + default: + BUG(); + } +} + +static void arch_cpu_pre_online(void *arg) +{ + unsigned int cpuid = hard_smp_processor_id(); + + register_percpu_ce(cpuid); + + calibrate_delay(); + smp_store_cpu_info(cpuid); + + local_ops->cache_all(); + local_ops->tlb_all(); + + switch(sparc_cpu_model) { + case sun4m: + sun4m_cpu_pre_online(arg); + break; + case sun4d: + sun4d_cpu_pre_online(arg); + break; + case sparc_leon: + leon_cpu_pre_online(arg); + break; + default: + BUG(); + } +} + +static void sparc_start_secondary(void *arg) +{ + unsigned int cpu; + + /* + * SMP booting is extremely fragile in some architectures. So run + * the cpu initialization code first before anything else. + */ + arch_cpu_pre_starting(arg); + + cpu = smp_processor_id(); + + notify_cpu_starting(cpu); + arch_cpu_pre_online(arg); + + /* Set the CPU in the cpu_online_mask */ + set_cpu_online(cpu, true); + + /* Enable local interrupts now */ + local_irq_enable(); + + wmb(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + + /* We should never reach here! */ + BUG(); +} + +void smp_callin(void) +{ + sparc_start_secondary(NULL); +} + +void smp_bogo(struct seq_file *m) +{ + int i; + + for_each_online_cpu(i) { + seq_printf(m, + "Cpu%dBogo\t: %lu.%02lu\n", + i, + cpu_data(i).udelay_val/(500000/HZ), + (cpu_data(i).udelay_val/(5000/HZ))%100); + } +} + +void smp_info(struct seq_file *m) +{ + int i; + + seq_printf(m, "State:\n"); + for_each_online_cpu(i) + seq_printf(m, "CPU%d\t\t: online\n", i); +} diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c new file mode 100644 index 000000000..a55295d1b --- /dev/null +++ b/arch/sparc/kernel/smp_64.c @@ -0,0 +1,1568 @@ +// SPDX-License-Identifier: GPL-2.0 +/* smp.c: Sparc64 SMP support. + * + * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpumap.h" +#include "kernel.h" + +DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; +cpumask_t cpu_core_map[NR_CPUS] __read_mostly = + { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; + +cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { + [0 ... NR_CPUS-1] = CPU_MASK_NONE }; + +cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { + [0 ... NR_CPUS - 1] = CPU_MASK_NONE }; + +EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); +EXPORT_SYMBOL(cpu_core_map); +EXPORT_SYMBOL(cpu_core_sib_map); +EXPORT_SYMBOL(cpu_core_sib_cache_map); + +static cpumask_t smp_commenced_mask; + +static DEFINE_PER_CPU(bool, poke); +static bool cpu_poke; + +void smp_info(struct seq_file *m) +{ + int i; + + seq_printf(m, "State:\n"); + for_each_online_cpu(i) + seq_printf(m, "CPU%d:\t\tonline\n", i); +} + +void smp_bogo(struct seq_file *m) +{ + int i; + + for_each_online_cpu(i) + seq_printf(m, + "Cpu%dClkTck\t: %016lx\n", + i, cpu_data(i).clock_tick); +} + +extern void setup_sparc64_timer(void); + +static volatile unsigned long callin_flag = 0; + +void smp_callin(void) +{ + int cpuid = hard_smp_processor_id(); + + __local_per_cpu_offset = __per_cpu_offset(cpuid); + + if (tlb_type == hypervisor) + sun4v_ktsb_register(); + + __flush_tlb_all(); + + setup_sparc64_timer(); + + if (cheetah_pcache_forced_on) + cheetah_enable_pcache(); + + callin_flag = 1; + __asm__ __volatile__("membar #Sync\n\t" + "flush %%g6" : : : "memory"); + + /* Clear this or we will die instantly when we + * schedule back to this idler... + */ + current_thread_info()->new_child = 0; + + /* Attach to the address space of init_task. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + + /* inform the notifiers about the new cpu */ + notify_cpu_starting(cpuid); + + while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) + rmb(); + + set_cpu_online(cpuid, true); + + local_irq_enable(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +void cpu_panic(void) +{ + printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); + panic("SMP bolixed\n"); +} + +/* This tick register synchronization scheme is taken entirely from + * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. + * + * The only change I've made is to rework it so that the master + * initiates the synchonization instead of the slave. -DaveM + */ + +#define MASTER 0 +#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) + +#define NUM_ROUNDS 64 /* magic value */ +#define NUM_ITERS 5 /* likewise */ + +static DEFINE_RAW_SPINLOCK(itc_sync_lock); +static unsigned long go[SLAVE + 1]; + +#define DEBUG_TICK_SYNC 0 + +static inline long get_delta (long *rt, long *master) +{ + unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; + unsigned long tcenter, t0, t1, tm; + unsigned long i; + + for (i = 0; i < NUM_ITERS; i++) { + t0 = tick_ops->get_tick(); + go[MASTER] = 1; + membar_safe("#StoreLoad"); + while (!(tm = go[SLAVE])) + rmb(); + go[SLAVE] = 0; + wmb(); + t1 = tick_ops->get_tick(); + + if (t1 - t0 < best_t1 - best_t0) + best_t0 = t0, best_t1 = t1, best_tm = tm; + } + + *rt = best_t1 - best_t0; + *master = best_tm - best_t0; + + /* average best_t0 and best_t1 without overflow: */ + tcenter = (best_t0/2 + best_t1/2); + if (best_t0 % 2 + best_t1 % 2 == 2) + tcenter++; + return tcenter - best_tm; +} + +void smp_synchronize_tick_client(void) +{ + long i, delta, adj, adjust_latency = 0, done = 0; + unsigned long flags, rt, master_time_stamp; +#if DEBUG_TICK_SYNC + struct { + long rt; /* roundtrip time */ + long master; /* master's timestamp */ + long diff; /* difference between midpoint and master's timestamp */ + long lat; /* estimate of itc adjustment latency */ + } t[NUM_ROUNDS]; +#endif + + go[MASTER] = 1; + + while (go[MASTER]) + rmb(); + + local_irq_save(flags); + { + for (i = 0; i < NUM_ROUNDS; i++) { + delta = get_delta(&rt, &master_time_stamp); + if (delta == 0) + done = 1; /* let's lock on to this... */ + + if (!done) { + if (i > 0) { + adjust_latency += -delta; + adj = -delta + adjust_latency/4; + } else + adj = -delta; + + tick_ops->add_tick(adj); + } +#if DEBUG_TICK_SYNC + t[i].rt = rt; + t[i].master = master_time_stamp; + t[i].diff = delta; + t[i].lat = adjust_latency/4; +#endif + } + } + local_irq_restore(flags); + +#if DEBUG_TICK_SYNC + for (i = 0; i < NUM_ROUNDS; i++) + printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", + t[i].rt, t[i].master, t[i].diff, t[i].lat); +#endif + + printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " + "(last diff %ld cycles, maxerr %lu cycles)\n", + smp_processor_id(), delta, rt); +} + +static void smp_start_sync_tick_client(int cpu); + +static void smp_synchronize_one_tick(int cpu) +{ + unsigned long flags, i; + + go[MASTER] = 0; + + smp_start_sync_tick_client(cpu); + + /* wait for client to be ready */ + while (!go[MASTER]) + rmb(); + + /* now let the client proceed into his loop */ + go[MASTER] = 0; + membar_safe("#StoreLoad"); + + raw_spin_lock_irqsave(&itc_sync_lock, flags); + { + for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { + while (!go[MASTER]) + rmb(); + go[MASTER] = 0; + wmb(); + go[SLAVE] = tick_ops->get_tick(); + membar_safe("#StoreLoad"); + } + } + raw_spin_unlock_irqrestore(&itc_sync_lock, flags); +} + +#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) +static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, + void **descrp) +{ + extern unsigned long sparc64_ttable_tl0; + extern unsigned long kern_locked_tte_data; + struct hvtramp_descr *hdesc; + unsigned long trampoline_ra; + struct trap_per_cpu *tb; + u64 tte_vaddr, tte_data; + unsigned long hv_err; + int i; + + hdesc = kzalloc(sizeof(*hdesc) + + (sizeof(struct hvtramp_mapping) * + num_kernel_image_mappings - 1), + GFP_KERNEL); + if (!hdesc) { + printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " + "hvtramp_descr.\n"); + return; + } + *descrp = hdesc; + + hdesc->cpu = cpu; + hdesc->num_mappings = num_kernel_image_mappings; + + tb = &trap_block[cpu]; + + hdesc->fault_info_va = (unsigned long) &tb->fault_info; + hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); + + hdesc->thread_reg = thread_reg; + + tte_vaddr = (unsigned long) KERNBASE; + tte_data = kern_locked_tte_data; + + for (i = 0; i < hdesc->num_mappings; i++) { + hdesc->maps[i].vaddr = tte_vaddr; + hdesc->maps[i].tte = tte_data; + tte_vaddr += 0x400000; + tte_data += 0x400000; + } + + trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); + + hv_err = sun4v_cpu_start(cpu, trampoline_ra, + kimage_addr_to_ra(&sparc64_ttable_tl0), + __pa(hdesc)); + if (hv_err) + printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " + "gives error %lu\n", hv_err); +} +#endif + +extern unsigned long sparc64_cpu_startup; + +/* The OBP cpu startup callback truncates the 3rd arg cookie to + * 32-bits (I think) so to be safe we have it read the pointer + * contained here so we work on >4GB machines. -DaveM + */ +static struct thread_info *cpu_new_thread = NULL; + +static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) +{ + unsigned long entry = + (unsigned long)(&sparc64_cpu_startup); + unsigned long cookie = + (unsigned long)(&cpu_new_thread); + void *descr = NULL; + int timeout, ret; + + callin_flag = 0; + cpu_new_thread = task_thread_info(idle); + + if (tlb_type == hypervisor) { +#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) + if (ldom_domaining_enabled) + ldom_startcpu_cpuid(cpu, + (unsigned long) cpu_new_thread, + &descr); + else +#endif + prom_startcpu_cpuid(cpu, entry, cookie); + } else { + struct device_node *dp = of_find_node_by_cpuid(cpu); + + prom_startcpu(dp->phandle, entry, cookie); + } + + for (timeout = 0; timeout < 50000; timeout++) { + if (callin_flag) + break; + udelay(100); + } + + if (callin_flag) { + ret = 0; + } else { + printk("Processor %d is stuck.\n", cpu); + ret = -ENODEV; + } + cpu_new_thread = NULL; + + kfree(descr); + + return ret; +} + +static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) +{ + u64 result, target; + int stuck, tmp; + + if (this_is_starfire) { + /* map to real upaid */ + cpu = (((cpu & 0x3c) << 1) | + ((cpu & 0x40) >> 4) | + (cpu & 0x3)); + } + + target = (cpu << 14) | 0x70; +again: + /* Ok, this is the real Spitfire Errata #54. + * One must read back from a UDB internal register + * after writes to the UDB interrupt dispatch, but + * before the membar Sync for that write. + * So we use the high UDB control register (ASI 0x7f, + * ADDR 0x20) for the dummy read. -DaveM + */ + tmp = 0x40; + __asm__ __volatile__( + "wrpr %1, %2, %%pstate\n\t" + "stxa %4, [%0] %3\n\t" + "stxa %5, [%0+%8] %3\n\t" + "add %0, %8, %0\n\t" + "stxa %6, [%0+%8] %3\n\t" + "membar #Sync\n\t" + "stxa %%g0, [%7] %3\n\t" + "membar #Sync\n\t" + "mov 0x20, %%g1\n\t" + "ldxa [%%g1] 0x7f, %%g0\n\t" + "membar #Sync" + : "=r" (tmp) + : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), + "r" (data0), "r" (data1), "r" (data2), "r" (target), + "r" (0x10), "0" (tmp) + : "g1"); + + /* NOTE: PSTATE_IE is still clear. */ + stuck = 100000; + do { + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=r" (result) + : "i" (ASI_INTR_DISPATCH_STAT)); + if (result == 0) { + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" + : : "r" (pstate)); + return; + } + stuck -= 1; + if (stuck == 0) + break; + } while (result & 0x1); + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" + : : "r" (pstate)); + if (stuck == 0) { + printk("CPU[%d]: mondo stuckage result[%016llx]\n", + smp_processor_id(), result); + } else { + udelay(2); + goto again; + } +} + +static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) +{ + u64 *mondo, data0, data1, data2; + u16 *cpu_list; + u64 pstate; + int i; + + __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); + cpu_list = __va(tb->cpu_list_pa); + mondo = __va(tb->cpu_mondo_block_pa); + data0 = mondo[0]; + data1 = mondo[1]; + data2 = mondo[2]; + for (i = 0; i < cnt; i++) + spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); +} + +/* Cheetah now allows to send the whole 64-bytes of data in the interrupt + * packet, but we have no use for that. However we do take advantage of + * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). + */ +static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) +{ + int nack_busy_id, is_jbus, need_more; + u64 *mondo, pstate, ver, busy_mask; + u16 *cpu_list; + + cpu_list = __va(tb->cpu_list_pa); + mondo = __va(tb->cpu_mondo_block_pa); + + /* Unfortunately, someone at Sun had the brilliant idea to make the + * busy/nack fields hard-coded by ITID number for this Ultra-III + * derivative processor. + */ + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + is_jbus = ((ver >> 32) == __JALAPENO_ID || + (ver >> 32) == __SERRANO_ID); + + __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); + +retry: + need_more = 0; + __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" + : : "r" (pstate), "i" (PSTATE_IE)); + + /* Setup the dispatch data registers. */ + __asm__ __volatile__("stxa %0, [%3] %6\n\t" + "stxa %1, [%4] %6\n\t" + "stxa %2, [%5] %6\n\t" + "membar #Sync\n\t" + : /* no outputs */ + : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), + "r" (0x40), "r" (0x50), "r" (0x60), + "i" (ASI_INTR_W)); + + nack_busy_id = 0; + busy_mask = 0; + { + int i; + + for (i = 0; i < cnt; i++) { + u64 target, nr; + + nr = cpu_list[i]; + if (nr == 0xffff) + continue; + + target = (nr << 14) | 0x70; + if (is_jbus) { + busy_mask |= (0x1UL << (nr * 2)); + } else { + target |= (nack_busy_id << 24); + busy_mask |= (0x1UL << + (nack_busy_id * 2)); + } + __asm__ __volatile__( + "stxa %%g0, [%0] %1\n\t" + "membar #Sync\n\t" + : /* no outputs */ + : "r" (target), "i" (ASI_INTR_W)); + nack_busy_id++; + if (nack_busy_id == 32) { + need_more = 1; + break; + } + } + } + + /* Now, poll for completion. */ + { + u64 dispatch_stat, nack_mask; + long stuck; + + stuck = 100000 * nack_busy_id; + nack_mask = busy_mask << 1; + do { + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=r" (dispatch_stat) + : "i" (ASI_INTR_DISPATCH_STAT)); + if (!(dispatch_stat & (busy_mask | nack_mask))) { + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" + : : "r" (pstate)); + if (unlikely(need_more)) { + int i, this_cnt = 0; + for (i = 0; i < cnt; i++) { + if (cpu_list[i] == 0xffff) + continue; + cpu_list[i] = 0xffff; + this_cnt++; + if (this_cnt == 32) + break; + } + goto retry; + } + return; + } + if (!--stuck) + break; + } while (dispatch_stat & busy_mask); + + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" + : : "r" (pstate)); + + if (dispatch_stat & busy_mask) { + /* Busy bits will not clear, continue instead + * of freezing up on this cpu. + */ + printk("CPU[%d]: mondo stuckage result[%016llx]\n", + smp_processor_id(), dispatch_stat); + } else { + int i, this_busy_nack = 0; + + /* Delay some random time with interrupts enabled + * to prevent deadlock. + */ + udelay(2 * nack_busy_id); + + /* Clear out the mask bits for cpus which did not + * NACK us. + */ + for (i = 0; i < cnt; i++) { + u64 check_mask, nr; + + nr = cpu_list[i]; + if (nr == 0xffff) + continue; + + if (is_jbus) + check_mask = (0x2UL << (2*nr)); + else + check_mask = (0x2UL << + this_busy_nack); + if ((dispatch_stat & check_mask) == 0) + cpu_list[i] = 0xffff; + this_busy_nack += 2; + if (this_busy_nack == 64) + break; + } + + goto retry; + } + } +} + +#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) +#define MONDO_USEC_WAIT_MIN 2 +#define MONDO_USEC_WAIT_MAX 100 +#define MONDO_RETRY_LIMIT 500000 + +/* Multi-cpu list version. + * + * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'. + * Sometimes not all cpus receive the mondo, requiring us to re-send + * the mondo until all cpus have received, or cpus are truly stuck + * unable to receive mondo, and we timeout. + * Occasionally a target cpu strand is borrowed briefly by hypervisor to + * perform guest service, such as PCIe error handling. Consider the + * service time, 1 second overall wait is reasonable for 1 cpu. + * Here two in-between mondo check wait time are defined: 2 usec for + * single cpu quick turn around and up to 100usec for large cpu count. + * Deliver mondo to large number of cpus could take longer, we adjusts + * the retry count as long as target cpus are making forward progress. + */ +static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) +{ + int this_cpu, tot_cpus, prev_sent, i, rem; + int usec_wait, retries, tot_retries; + u16 first_cpu = 0xffff; + unsigned long xc_rcvd = 0; + unsigned long status; + int ecpuerror_id = 0; + int enocpu_id = 0; + u16 *cpu_list; + u16 cpu; + + this_cpu = smp_processor_id(); + cpu_list = __va(tb->cpu_list_pa); + usec_wait = cnt * MONDO_USEC_WAIT_MIN; + if (usec_wait > MONDO_USEC_WAIT_MAX) + usec_wait = MONDO_USEC_WAIT_MAX; + retries = tot_retries = 0; + tot_cpus = cnt; + prev_sent = 0; + + do { + int n_sent, mondo_delivered, target_cpu_busy; + + status = sun4v_cpu_mondo_send(cnt, + tb->cpu_list_pa, + tb->cpu_mondo_block_pa); + + /* HV_EOK means all cpus received the xcall, we're done. */ + if (likely(status == HV_EOK)) + goto xcall_done; + + /* If not these non-fatal errors, panic */ + if (unlikely((status != HV_EWOULDBLOCK) && + (status != HV_ECPUERROR) && + (status != HV_ENOCPU))) + goto fatal_errors; + + /* First, see if we made any forward progress. + * + * Go through the cpu_list, count the target cpus that have + * received our mondo (n_sent), and those that did not (rem). + * Re-pack cpu_list with the cpus remain to be retried in the + * front - this simplifies tracking the truly stalled cpus. + * + * The hypervisor indicates successful sends by setting + * cpu list entries to the value 0xffff. + * + * EWOULDBLOCK means some target cpus did not receive the + * mondo and retry usually helps. + * + * ECPUERROR means at least one target cpu is in error state, + * it's usually safe to skip the faulty cpu and retry. + * + * ENOCPU means one of the target cpu doesn't belong to the + * domain, perhaps offlined which is unexpected, but not + * fatal and it's okay to skip the offlined cpu. + */ + rem = 0; + n_sent = 0; + for (i = 0; i < cnt; i++) { + cpu = cpu_list[i]; + if (likely(cpu == 0xffff)) { + n_sent++; + } else if ((status == HV_ECPUERROR) && + (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { + ecpuerror_id = cpu + 1; + } else if (status == HV_ENOCPU && !cpu_online(cpu)) { + enocpu_id = cpu + 1; + } else { + cpu_list[rem++] = cpu; + } + } + + /* No cpu remained, we're done. */ + if (rem == 0) + break; + + /* Otherwise, update the cpu count for retry. */ + cnt = rem; + + /* Record the overall number of mondos received by the + * first of the remaining cpus. + */ + if (first_cpu != cpu_list[0]) { + first_cpu = cpu_list[0]; + xc_rcvd = CPU_MONDO_COUNTER(first_cpu); + } + + /* Was any mondo delivered successfully? */ + mondo_delivered = (n_sent > prev_sent); + prev_sent = n_sent; + + /* or, was any target cpu busy processing other mondos? */ + target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu)); + xc_rcvd = CPU_MONDO_COUNTER(first_cpu); + + /* Retry count is for no progress. If we're making progress, + * reset the retry count. + */ + if (likely(mondo_delivered || target_cpu_busy)) { + tot_retries += retries; + retries = 0; + } else if (unlikely(retries > MONDO_RETRY_LIMIT)) { + goto fatal_mondo_timeout; + } + + /* Delay a little bit to let other cpus catch up on + * their cpu mondo queue work. + */ + if (!mondo_delivered) + udelay(usec_wait); + + retries++; + } while (1); + +xcall_done: + if (unlikely(ecpuerror_id > 0)) { + pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", + this_cpu, ecpuerror_id - 1); + } else if (unlikely(enocpu_id > 0)) { + pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", + this_cpu, enocpu_id - 1); + } + return; + +fatal_errors: + /* fatal errors include bad alignment, etc */ + pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", + this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); + panic("Unexpected SUN4V mondo error %lu\n", status); + +fatal_mondo_timeout: + /* some cpus being non-responsive to the cpu mondo */ + pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n", + this_cpu, first_cpu, (tot_retries + retries), tot_cpus); + panic("SUN4V mondo timeout panic\n"); +} + +static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); + +static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) +{ + struct trap_per_cpu *tb; + int this_cpu, i, cnt; + unsigned long flags; + u16 *cpu_list; + u64 *mondo; + + /* We have to do this whole thing with interrupts fully disabled. + * Otherwise if we send an xcall from interrupt context it will + * corrupt both our mondo block and cpu list state. + * + * One consequence of this is that we cannot use timeout mechanisms + * that depend upon interrupts being delivered locally. So, for + * example, we cannot sample jiffies and expect it to advance. + * + * Fortunately, udelay() uses %stick/%tick so we can use that. + */ + local_irq_save(flags); + + this_cpu = smp_processor_id(); + tb = &trap_block[this_cpu]; + + mondo = __va(tb->cpu_mondo_block_pa); + mondo[0] = data0; + mondo[1] = data1; + mondo[2] = data2; + wmb(); + + cpu_list = __va(tb->cpu_list_pa); + + /* Setup the initial cpu list. */ + cnt = 0; + for_each_cpu(i, mask) { + if (i == this_cpu || !cpu_online(i)) + continue; + cpu_list[cnt++] = i; + } + + if (cnt) + xcall_deliver_impl(tb, cnt); + + local_irq_restore(flags); +} + +/* Send cross call to all processors mentioned in MASK_P + * except self. Really, there are only two cases currently, + * "cpu_online_mask" and "mm_cpumask(mm)". + */ +static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) +{ + u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); + + xcall_deliver(data0, data1, data2, mask); +} + +/* Send cross call to all processors except self. */ +static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) +{ + smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); +} + +extern unsigned long xcall_sync_tick; + +static void smp_start_sync_tick_client(int cpu) +{ + xcall_deliver((u64) &xcall_sync_tick, 0, 0, + cpumask_of(cpu)); +} + +extern unsigned long xcall_call_function; + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + xcall_deliver((u64) &xcall_call_function, 0, 0, mask); +} + +extern unsigned long xcall_call_function_single; + +void arch_send_call_function_single_ipi(int cpu) +{ + xcall_deliver((u64) &xcall_call_function_single, 0, 0, + cpumask_of(cpu)); +} + +void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) +{ + clear_softint(1 << irq); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); +} + +void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) +{ + clear_softint(1 << irq); + irq_enter(); + generic_smp_call_function_single_interrupt(); + irq_exit(); +} + +static void tsb_sync(void *info) +{ + struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; + struct mm_struct *mm = info; + + /* It is not valid to test "current->active_mm == mm" here. + * + * The value of "current" is not changed atomically with + * switch_mm(). But that's OK, we just need to check the + * current cpu's trap block PGD physical address. + */ + if (tp->pgd_paddr == __pa(mm->pgd)) + tsb_context_switch(mm); +} + +void smp_tsb_sync(struct mm_struct *mm) +{ + smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); +} + +extern unsigned long xcall_flush_tlb_mm; +extern unsigned long xcall_flush_tlb_page; +extern unsigned long xcall_flush_tlb_kernel_range; +extern unsigned long xcall_fetch_glob_regs; +extern unsigned long xcall_fetch_glob_pmu; +extern unsigned long xcall_fetch_glob_pmu_n4; +extern unsigned long xcall_receive_signal; +extern unsigned long xcall_new_mmu_context_version; +#ifdef CONFIG_KGDB +extern unsigned long xcall_kgdb_capture; +#endif + +#ifdef DCACHE_ALIASING_POSSIBLE +extern unsigned long xcall_flush_dcache_page_cheetah; +#endif +extern unsigned long xcall_flush_dcache_page_spitfire; + +static inline void __local_flush_dcache_page(struct page *page) +{ +#ifdef DCACHE_ALIASING_POSSIBLE + __flush_dcache_page(page_address(page), + ((tlb_type == spitfire) && + page_mapping_file(page) != NULL)); +#else + if (page_mapping_file(page) != NULL && + tlb_type == spitfire) + __flush_icache_page(__pa(page_address(page))); +#endif +} + +void smp_flush_dcache_page_impl(struct page *page, int cpu) +{ + int this_cpu; + + if (tlb_type == hypervisor) + return; + +#ifdef CONFIG_DEBUG_DCFLUSH + atomic_inc(&dcpage_flushes); +#endif + + this_cpu = get_cpu(); + + if (cpu == this_cpu) { + __local_flush_dcache_page(page); + } else if (cpu_online(cpu)) { + void *pg_addr = page_address(page); + u64 data0 = 0; + + if (tlb_type == spitfire) { + data0 = ((u64)&xcall_flush_dcache_page_spitfire); + if (page_mapping_file(page) != NULL) + data0 |= ((u64)1 << 32); + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { +#ifdef DCACHE_ALIASING_POSSIBLE + data0 = ((u64)&xcall_flush_dcache_page_cheetah); +#endif + } + if (data0) { + xcall_deliver(data0, __pa(pg_addr), + (u64) pg_addr, cpumask_of(cpu)); +#ifdef CONFIG_DEBUG_DCFLUSH + atomic_inc(&dcpage_flushes_xcall); +#endif + } + } + + put_cpu(); +} + +void flush_dcache_page_all(struct mm_struct *mm, struct page *page) +{ + void *pg_addr; + u64 data0; + + if (tlb_type == hypervisor) + return; + + preempt_disable(); + +#ifdef CONFIG_DEBUG_DCFLUSH + atomic_inc(&dcpage_flushes); +#endif + data0 = 0; + pg_addr = page_address(page); + if (tlb_type == spitfire) { + data0 = ((u64)&xcall_flush_dcache_page_spitfire); + if (page_mapping_file(page) != NULL) + data0 |= ((u64)1 << 32); + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { +#ifdef DCACHE_ALIASING_POSSIBLE + data0 = ((u64)&xcall_flush_dcache_page_cheetah); +#endif + } + if (data0) { + xcall_deliver(data0, __pa(pg_addr), + (u64) pg_addr, cpu_online_mask); +#ifdef CONFIG_DEBUG_DCFLUSH + atomic_inc(&dcpage_flushes_xcall); +#endif + } + __local_flush_dcache_page(page); + + preempt_enable(); +} + +#ifdef CONFIG_KGDB +void kgdb_roundup_cpus(void) +{ + smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); +} +#endif + +void smp_fetch_global_regs(void) +{ + smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); +} + +void smp_fetch_global_pmu(void) +{ + if (tlb_type == hypervisor && + sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) + smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); + else + smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); +} + +/* We know that the window frames of the user have been flushed + * to the stack before we get here because all callers of us + * are flush_tlb_*() routines, and these run after flush_cache_*() + * which performs the flushw. + * + * mm->cpu_vm_mask is a bit mask of which cpus an address + * space has (potentially) executed on, this is the heuristic + * we use to limit cross calls. + */ + +/* This currently is only used by the hugetlb arch pre-fault + * hook on UltraSPARC-III+ and later when changing the pagesize + * bits of the context register for an address space. + */ +void smp_flush_tlb_mm(struct mm_struct *mm) +{ + u32 ctx = CTX_HWBITS(mm->context); + + get_cpu(); + + smp_cross_call_masked(&xcall_flush_tlb_mm, + ctx, 0, 0, + mm_cpumask(mm)); + + __flush_tlb_mm(ctx, SECONDARY_CONTEXT); + + put_cpu(); +} + +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + +void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) +{ + u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; + + get_cpu(); + + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); + + __flush_tlb_pending(ctx, nr, vaddrs); + + put_cpu(); +} + +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + + get_cpu(); + + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + +void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + start &= PAGE_MASK; + end = PAGE_ALIGN(end); + if (start != end) { + smp_cross_call(&xcall_flush_tlb_kernel_range, + 0, start, end); + + __flush_tlb_kernel_range(start, end); + } +} + +/* CPU capture. */ +/* #define CAPTURE_DEBUG */ +extern unsigned long xcall_capture; + +static atomic_t smp_capture_depth = ATOMIC_INIT(0); +static atomic_t smp_capture_registry = ATOMIC_INIT(0); +static unsigned long penguins_are_doing_time; + +void smp_capture(void) +{ + int result = atomic_add_return(1, &smp_capture_depth); + + if (result == 1) { + int ncpus = num_online_cpus(); + +#ifdef CAPTURE_DEBUG + printk("CPU[%d]: Sending penguins to jail...", + smp_processor_id()); +#endif + penguins_are_doing_time = 1; + atomic_inc(&smp_capture_registry); + smp_cross_call(&xcall_capture, 0, 0, 0); + while (atomic_read(&smp_capture_registry) != ncpus) + rmb(); +#ifdef CAPTURE_DEBUG + printk("done\n"); +#endif + } +} + +void smp_release(void) +{ + if (atomic_dec_and_test(&smp_capture_depth)) { +#ifdef CAPTURE_DEBUG + printk("CPU[%d]: Giving pardon to " + "imprisoned penguins\n", + smp_processor_id()); +#endif + penguins_are_doing_time = 0; + membar_safe("#StoreLoad"); + atomic_dec(&smp_capture_registry); + } +} + +/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE + * set, so they can service tlb flush xcalls... + */ +extern void prom_world(int); + +void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) +{ + clear_softint(1 << irq); + + preempt_disable(); + + __asm__ __volatile__("flushw"); + prom_world(1); + atomic_inc(&smp_capture_registry); + membar_safe("#StoreLoad"); + while (penguins_are_doing_time) + rmb(); + atomic_dec(&smp_capture_registry); + prom_world(0); + + preempt_enable(); +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ +} + +void smp_prepare_boot_cpu(void) +{ +} + +void __init smp_setup_processor_id(void) +{ + if (tlb_type == spitfire) + xcall_deliver_impl = spitfire_xcall_deliver; + else if (tlb_type == cheetah || tlb_type == cheetah_plus) + xcall_deliver_impl = cheetah_xcall_deliver; + else + xcall_deliver_impl = hypervisor_xcall_deliver; +} + +void __init smp_fill_in_cpu_possible_map(void) +{ + int possible_cpus = num_possible_cpus(); + int i; + + if (possible_cpus > nr_cpu_ids) + possible_cpus = nr_cpu_ids; + + for (i = 0; i < possible_cpus; i++) + set_cpu_possible(i, true); + for (; i < NR_CPUS; i++) + set_cpu_possible(i, false); +} + +void smp_fill_in_sib_core_maps(void) +{ + unsigned int i; + + for_each_present_cpu(i) { + unsigned int j; + + cpumask_clear(&cpu_core_map[i]); + if (cpu_data(i).core_id == 0) { + cpumask_set_cpu(i, &cpu_core_map[i]); + continue; + } + + for_each_present_cpu(j) { + if (cpu_data(i).core_id == + cpu_data(j).core_id) + cpumask_set_cpu(j, &cpu_core_map[i]); + } + } + + for_each_present_cpu(i) { + unsigned int j; + + for_each_present_cpu(j) { + if (cpu_data(i).max_cache_id == + cpu_data(j).max_cache_id) + cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]); + + if (cpu_data(i).sock_id == cpu_data(j).sock_id) + cpumask_set_cpu(j, &cpu_core_sib_map[i]); + } + } + + for_each_present_cpu(i) { + unsigned int j; + + cpumask_clear(&per_cpu(cpu_sibling_map, i)); + if (cpu_data(i).proc_id == -1) { + cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); + continue; + } + + for_each_present_cpu(j) { + if (cpu_data(i).proc_id == + cpu_data(j).proc_id) + cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); + } + } +} + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + int ret = smp_boot_one_cpu(cpu, tidle); + + if (!ret) { + cpumask_set_cpu(cpu, &smp_commenced_mask); + while (!cpu_online(cpu)) + mb(); + if (!cpu_online(cpu)) { + ret = -ENODEV; + } else { + /* On SUN4V, writes to %tick and %stick are + * not allowed. + */ + if (tlb_type != hypervisor) + smp_synchronize_one_tick(cpu); + } + } + return ret; +} + +#ifdef CONFIG_HOTPLUG_CPU +void cpu_play_dead(void) +{ + int cpu = smp_processor_id(); + unsigned long pstate; + + idle_task_exit(); + + if (tlb_type == hypervisor) { + struct trap_per_cpu *tb = &trap_block[cpu]; + + sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, + tb->cpu_mondo_pa, 0); + sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, + tb->dev_mondo_pa, 0); + sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, + tb->resum_mondo_pa, 0); + sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, + tb->nonresum_mondo_pa, 0); + } + + cpumask_clear_cpu(cpu, &smp_commenced_mask); + membar_safe("#Sync"); + + local_irq_disable(); + + __asm__ __volatile__( + "rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); + + while (1) + barrier(); +} + +int __cpu_disable(void) +{ + int cpu = smp_processor_id(); + cpuinfo_sparc *c; + int i; + + for_each_cpu(i, &cpu_core_map[cpu]) + cpumask_clear_cpu(cpu, &cpu_core_map[i]); + cpumask_clear(&cpu_core_map[cpu]); + + for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) + cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); + cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); + + c = &cpu_data(cpu); + + c->core_id = 0; + c->proc_id = -1; + + smp_wmb(); + + /* Make sure no interrupts point to this cpu. */ + fixup_irqs(); + + local_irq_enable(); + mdelay(1); + local_irq_disable(); + + set_cpu_online(cpu, false); + + cpu_map_rebuild(); + + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + int i; + + for (i = 0; i < 100; i++) { + smp_rmb(); + if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) + break; + msleep(100); + } + if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { + printk(KERN_ERR "CPU %u didn't die...\n", cpu); + } else { +#if defined(CONFIG_SUN_LDOMS) + unsigned long hv_err; + int limit = 100; + + do { + hv_err = sun4v_cpu_stop(cpu); + if (hv_err == HV_EOK) { + set_cpu_present(cpu, false); + break; + } + } while (--limit > 0); + if (limit <= 0) { + printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", + hv_err); + } +#endif + } +} +#endif + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +static void send_cpu_ipi(int cpu) +{ + xcall_deliver((u64) &xcall_receive_signal, + 0, 0, cpumask_of(cpu)); +} + +void scheduler_poke(void) +{ + if (!cpu_poke) + return; + + if (!__this_cpu_read(poke)) + return; + + __this_cpu_write(poke, false); + set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); +} + +static unsigned long send_cpu_poke(int cpu) +{ + unsigned long hv_err; + + per_cpu(poke, cpu) = true; + hv_err = sun4v_cpu_poke(cpu); + if (hv_err != HV_EOK) { + per_cpu(poke, cpu) = false; + pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n", + __func__, hv_err); + } + + return hv_err; +} + +void smp_send_reschedule(int cpu) +{ + if (cpu == smp_processor_id()) { + WARN_ON_ONCE(preemptible()); + set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); + return; + } + + /* Use cpu poke to resume idle cpu if supported. */ + if (cpu_poke && idle_cpu(cpu)) { + unsigned long ret; + + ret = send_cpu_poke(cpu); + if (ret == HV_EOK) + return; + } + + /* Use IPI in following cases: + * - cpu poke not supported + * - cpu not idle + * - send_cpu_poke() returns with error + */ + send_cpu_ipi(cpu); +} + +void smp_init_cpu_poke(void) +{ + unsigned long major; + unsigned long minor; + int ret; + + if (tlb_type != hypervisor) + return; + + ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor); + if (ret) { + pr_debug("HV_GRP_CORE is not registered\n"); + return; + } + + if (major == 1 && minor >= 6) { + /* CPU POKE is registered. */ + cpu_poke = true; + return; + } + + pr_debug("CPU_POKE not supported\n"); +} + +void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) +{ + clear_softint(1 << irq); + scheduler_ipi(); +} + +static void stop_this_cpu(void *dummy) +{ + set_cpu_online(smp_processor_id(), false); + prom_stopself(); +} + +void smp_send_stop(void) +{ + int cpu; + + if (tlb_type == hypervisor) { + int this_cpu = smp_processor_id(); +#ifdef CONFIG_SERIAL_SUNHV + sunhv_migrate_hvcons_irq(this_cpu); +#endif + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + + set_cpu_online(cpu, false); +#ifdef CONFIG_SUN_LDOMS + if (ldom_domaining_enabled) { + unsigned long hv_err; + hv_err = sun4v_cpu_stop(cpu); + if (hv_err) + printk(KERN_ERR "sun4v_cpu_stop() " + "failed err=%lu\n", hv_err); + } else +#endif + prom_stopcpu_cpuid(cpu); + } + } else + smp_call_function(stop_this_cpu, NULL, 0); +} + +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + if (cpu_to_node(from) == cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; +} + +static int __init pcpu_cpu_to_node(int cpu) +{ + return cpu_to_node(cpu); +} + +void __init setup_per_cpu_areas(void) +{ + unsigned long delta; + unsigned int cpu; + int rc = -EINVAL; + + if (pcpu_chosen_fc != PCPU_FC_PAGE) { + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, 4 << 20, + pcpu_cpu_distance, + pcpu_cpu_to_node); + if (rc) + pr_warn("PERCPU: %s allocator failed (%d), " + "falling back to page size\n", + pcpu_fc_names[pcpu_chosen_fc], rc); + } + if (rc < 0) + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, + pcpu_cpu_to_node); + if (rc < 0) + panic("cannot initialize percpu area (err=%d)", rc); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; + + /* Setup %g5 for the boot cpu. */ + __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); + + of_fill_in_cpu_data(); + if (tlb_type == hypervisor) + mdesc_fill_in_cpu_data(cpu_all_mask); +} diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c new file mode 100644 index 000000000..09aa69e42 --- /dev/null +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -0,0 +1,12 @@ +/* + * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) + */ + +#include +#include + +/* This is needed only for drivers/sbus/char/openprom.c */ +EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S new file mode 100644 index 000000000..5427af440 --- /dev/null +++ b/arch/sparc/kernel/spiterrs.S @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + /* We need to carefully read the error status, ACK the errors, + * prevent recursive traps, and pass the information on to C + * code for logging. + * + * We pass the AFAR in as-is, and we encode the status + * information as described in asm-sparc64/sfafsr.h + */ + .type __spitfire_access_error,#function +__spitfire_access_error: + /* Disable ESTATE error reporting so that we do not take + * recursive traps and RED state the processor. + */ + stxa %g0, [%g0] ASI_ESTATE_ERROR_EN + membar #Sync + + mov UDBE_UE, %g1 + ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR + + /* __spitfire_cee_trap branches here with AFSR in %g4 and + * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the ESTATE + * Error Enable register. + */ +__spitfire_cee_trap_continue: + ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR + + rdpr %tt, %g3 + and %g3, 0x1ff, %g3 ! Paranoia + sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3 + or %g4, %g3, %g4 + rdpr %tl, %g3 + cmp %g3, 1 + mov 1, %g3 + bleu %xcc, 1f + sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3 + + or %g4, %g3, %g4 + + /* Read in the UDB error register state, clearing the sticky + * error bits as-needed. We only clear them if the UE bit is + * set. Likewise, __spitfire_cee_trap below will only do so + * if the CE bit is set. + * + * NOTE: UltraSparc-I/II have high and low UDB error + * registers, corresponding to the two UDB units + * present on those chips. UltraSparc-IIi only + * has a single UDB, called "SDB" in the manual. + * For IIi the upper UDB register always reads + * as zero so for our purposes things will just + * work with the checks below. + */ +1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3 + and %g3, 0x3ff, %g7 ! Paranoia + sllx %g7, SFSTAT_UDBH_SHIFT, %g7 + or %g4, %g7, %g4 + andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE + be,pn %xcc, 1f + nop + stxa %g3, [%g0] ASI_UDB_ERROR_W + membar #Sync + +1: mov 0x18, %g3 + ldxa [%g3] ASI_UDBL_ERROR_R, %g3 + and %g3, 0x3ff, %g7 ! Paranoia + sllx %g7, SFSTAT_UDBL_SHIFT, %g7 + or %g4, %g7, %g4 + andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE + be,pn %xcc, 1f + nop + mov 0x18, %g7 + stxa %g3, [%g7] ASI_UDB_ERROR_W + membar #Sync + +1: /* Ok, now that we've latched the error state, clear the + * sticky bits in the AFSR. + */ + stxa %g4, [%g0] ASI_AFSR + membar #Sync + + rdpr %tl, %g2 + cmp %g2, 1 + rdpr %pil, %g2 + bleu,pt %xcc, 1f + wrpr %g0, PIL_NORMAL_MAX, %pil + + ba,pt %xcc, etraptl1 + rd %pc, %g7 + + ba,a,pt %xcc, 2f + nop + +1: ba,pt %xcc, etrap_irq + rd %pc, %g7 + +2: +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + mov %l4, %o1 + mov %l5, %o2 + call spitfire_access_error + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size __spitfire_access_error,.-__spitfire_access_error + + /* This is the trap handler entry point for ECC correctable + * errors. They are corrected, but we listen for the trap so + * that the event can be logged. + * + * Disrupting errors are either: + * 1) single-bit ECC errors during UDB reads to system + * memory + * 2) data parity errors during write-back events + * + * As far as I can make out from the manual, the CEE trap is + * only for correctable errors during memory read accesses by + * the front-end of the processor. + * + * The code below is only for trap level 1 CEE events, as it + * is the only situation where we can safely record and log. + * For trap level >1 we just clear the CE bit in the AFSR and + * return. + * + * This is just like __spiftire_access_error above, but it + * specifically handles correctable errors. If an + * uncorrectable error is indicated in the AFSR we will branch + * directly above to __spitfire_access_error to handle it + * instead. Uncorrectable therefore takes priority over + * correctable, and the error logging C code will notice this + * case by inspecting the trap type. + */ + .type __spitfire_cee_trap,#function +__spitfire_cee_trap: + ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR + mov 1, %g3 + sllx %g3, SFAFSR_UE_SHIFT, %g3 + andcc %g4, %g3, %g0 ! Check for UE + bne,pn %xcc, __spitfire_access_error + nop + + /* Ok, in this case we only have a correctable error. + * Indicate we only wish to capture that state in register + * %g1, and we only disable CE error reporting unlike UE + * handling which disables all errors. + */ + ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3 + andn %g3, ESTATE_ERR_CE, %g3 + stxa %g3, [%g0] ASI_ESTATE_ERROR_EN + membar #Sync + + /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */ + ba,pt %xcc, __spitfire_cee_trap_continue + mov UDBE_CE, %g1 + .size __spitfire_cee_trap,.-__spitfire_cee_trap + + .type __spitfire_data_access_exception_tl1,#function +__spitfire_data_access_exception_tl1: + rdpr %pstate, %g4 + wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate + mov TLB_SFSR, %g3 + mov DMMU_SFAR, %g5 + ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR + ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR + stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit + membar #Sync + rdpr %tt, %g3 + cmp %g3, 0x80 ! first win spill/fill trap + blu,pn %xcc, 1f + cmp %g3, 0xff ! last win spill/fill trap + bgu,pn %xcc, 1f + nop + ba,pt %xcc, winfix_dax + rdpr %tpc, %g3 +1: sethi %hi(109f), %g7 + ba,pt %xcc, etraptl1 +109: or %g7, %lo(109b), %g7 + mov %l4, %o1 + mov %l5, %o2 + call spitfire_data_access_exception_tl1 + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1 + + .type __spitfire_data_access_exception,#function +__spitfire_data_access_exception: + rdpr %pstate, %g4 + wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate + mov TLB_SFSR, %g3 + mov DMMU_SFAR, %g5 + ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR + ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR + stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit + membar #Sync + sethi %hi(109f), %g7 + ba,pt %xcc, etrap +109: or %g7, %lo(109b), %g7 + mov %l4, %o1 + mov %l5, %o2 + call spitfire_data_access_exception + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size __spitfire_data_access_exception,.-__spitfire_data_access_exception + + .type __spitfire_insn_access_exception_tl1,#function +__spitfire_insn_access_exception_tl1: + rdpr %pstate, %g4 + wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate + mov TLB_SFSR, %g3 + ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR + rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC + stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit + membar #Sync + sethi %hi(109f), %g7 + ba,pt %xcc, etraptl1 +109: or %g7, %lo(109b), %g7 + mov %l4, %o1 + mov %l5, %o2 + call spitfire_insn_access_exception_tl1 + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1 + + .type __spitfire_insn_access_exception,#function +__spitfire_insn_access_exception: + rdpr %pstate, %g4 + wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate + mov TLB_SFSR, %g3 + ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR + rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC + stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit + membar #Sync + sethi %hi(109f), %g7 + ba,pt %xcc, etrap +109: or %g7, %lo(109b), %g7 + mov %l4, %o1 + mov %l5, %o2 + call spitfire_insn_access_exception + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c new file mode 100644 index 000000000..3bcc4ddc6 --- /dev/null +++ b/arch/sparc/kernel/sstate.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* sstate.c: System soft state support. + * + * Copyright (C) 2007, 2008 David S. Miller + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "kernel.h" + +static int hv_supports_soft_state; + +static void do_set_sstate(unsigned long state, const char *msg) +{ + unsigned long err; + + if (!hv_supports_soft_state) + return; + + err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg)); + if (err) { + printk(KERN_WARNING "SSTATE: Failed to set soft-state to " + "state[%lx] msg[%s], err=%lu\n", + state, msg, err); + } +} + +static const char booting_msg[32] __attribute__((aligned(32))) = + "Linux booting"; +static const char running_msg[32] __attribute__((aligned(32))) = + "Linux running"; +static const char halting_msg[32] __attribute__((aligned(32))) = + "Linux halting"; +static const char poweroff_msg[32] __attribute__((aligned(32))) = + "Linux powering off"; +static const char rebooting_msg[32] __attribute__((aligned(32))) = + "Linux rebooting"; +static const char panicking_msg[32] __attribute__((aligned(32))) = + "Linux panicking"; + +static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused) +{ + const char *msg; + + switch (type) { + case SYS_DOWN: + default: + msg = rebooting_msg; + break; + + case SYS_HALT: + msg = halting_msg; + break; + + case SYS_POWER_OFF: + msg = poweroff_msg; + break; + } + + do_set_sstate(HV_SOFT_STATE_TRANSITION, msg); + + return NOTIFY_OK; +} + +static struct notifier_block sstate_reboot_notifier = { + .notifier_call = sstate_reboot_call, +}; + +static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) +{ + do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg); + + return NOTIFY_DONE; +} + +static struct notifier_block sstate_panic_block = { + .notifier_call = sstate_panic_event, + .priority = INT_MAX, +}; + +static int __init sstate_init(void) +{ + unsigned long major, minor; + + if (tlb_type != hypervisor) + return 0; + + major = 1; + minor = 0; + if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor)) + return 0; + + hv_supports_soft_state = 1; + + prom_sun4v_guest_soft_state(); + + do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg); + + atomic_notifier_chain_register(&panic_notifier_list, + &sstate_panic_block); + register_reboot_notifier(&sstate_reboot_notifier); + + return 0; +} + +core_initcall(sstate_init); + +static int __init sstate_running(void) +{ + do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg); + return 0; +} + +late_initcall(sstate_running); diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c new file mode 100644 index 000000000..d8eb1d149 --- /dev/null +++ b/arch/sparc/kernel/stacktrace.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kstack.h" + +static void __save_stack_trace(struct thread_info *tp, + struct stack_trace *trace, + bool skip_sched) +{ + unsigned long ksp, fp; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + struct task_struct *t; + int graph = 0; +#endif + + if (tp == current_thread_info()) { + stack_trace_flush(); + __asm__ __volatile__("mov %%fp, %0" : "=r" (ksp)); + } else { + ksp = tp->ksp; + } + + fp = ksp + STACK_BIAS; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + t = tp->task; +#endif + do { + struct sparc_stackf *sf; + struct pt_regs *regs; + unsigned long pc; + + if (!kstack_valid(tp, fp)) + break; + + sf = (struct sparc_stackf *) fp; + regs = (struct pt_regs *) (sf + 1); + + if (kstack_is_trap_frame(tp, regs)) { + if (!(regs->tstate & TSTATE_PRIV)) + break; + pc = regs->tpc; + fp = regs->u_regs[UREG_I6] + STACK_BIAS; + } else { + pc = sf->callers_pc; + fp = (unsigned long)sf->fp + STACK_BIAS; + } + + if (trace->skip > 0) + trace->skip--; + else if (!skip_sched || !in_sched_functions(pc)) { + trace->entries[trace->nr_entries++] = pc; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + struct ftrace_ret_stack *ret_stack; + ret_stack = ftrace_graph_get_ret_stack(t, + graph); + if (ret_stack) { + pc = ret_stack->ret; + if (trace->nr_entries < + trace->max_entries) + trace->entries[trace->nr_entries++] = pc; + graph++; + } + } +#endif + } + } while (trace->nr_entries < trace->max_entries); +} + +void save_stack_trace(struct stack_trace *trace) +{ + __save_stack_trace(current_thread_info(), trace, false); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + struct thread_info *tp = task_thread_info(tsk); + + __save_stack_trace(tp, trace, true); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c new file mode 100644 index 000000000..b8cd57d91 --- /dev/null +++ b/arch/sparc/kernel/starfire.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * starfire.c: Starfire/E10000 support. + * + * Copyright (C) 1998 David S. Miller (davem@redhat.com) + * Copyright (C) 2000 Anton Blanchard (anton@samba.org) + */ + +#include +#include + +#include +#include +#include +#include +#include + +/* + * A few places around the kernel check this to see if + * they need to call us to do things in a Starfire specific + * way. + */ +int this_is_starfire = 0; + +void check_if_starfire(void) +{ + phandle ssnode = prom_finddevice("/ssp-serial"); + if (ssnode != 0 && (s32)ssnode != -1) + this_is_starfire = 1; +} + +/* + * Each Starfire board has 32 registers which perform translation + * and delivery of traditional interrupt packets into the extended + * Starfire hardware format. Essentially UPAID's now have 2 more + * bits than in all previous Sun5 systems. + */ +struct starfire_irqinfo { + unsigned long imap_slots[32]; + unsigned long tregs[32]; + struct starfire_irqinfo *next; + int upaid, hwmid; +}; + +static struct starfire_irqinfo *sflist = NULL; + +/* Beam me up Scott(McNeil)y... */ +void starfire_hookup(int upaid) +{ + struct starfire_irqinfo *p; + unsigned long treg_base, hwmid, i; + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + prom_printf("starfire_hookup: No memory, this is insane.\n"); + prom_halt(); + } + treg_base = 0x100fc000000UL; + hwmid = ((upaid & 0x3c) << 1) | + ((upaid & 0x40) >> 4) | + (upaid & 0x3); + p->hwmid = hwmid; + treg_base += (hwmid << 33UL); + treg_base += 0x200UL; + for (i = 0; i < 32; i++) { + p->imap_slots[i] = 0UL; + p->tregs[i] = treg_base + (i * 0x10UL); + /* Lets play it safe and not overwrite existing mappings */ + if (upa_readl(p->tregs[i]) != 0) + p->imap_slots[i] = 0xdeadbeaf; + } + p->upaid = upaid; + p->next = sflist; + sflist = p; +} + +unsigned int starfire_translate(unsigned long imap, + unsigned int upaid) +{ + struct starfire_irqinfo *p; + unsigned int bus_hwmid; + unsigned int i; + + bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f; + for (p = sflist; p != NULL; p = p->next) + if (p->hwmid == bus_hwmid) + break; + if (p == NULL) { + prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n", + ((unsigned long)imap)); + prom_halt(); + } + for (i = 0; i < 32; i++) { + if (p->imap_slots[i] == imap || + p->imap_slots[i] == 0UL) + break; + } + if (i == 32) { + printk("starfire_translate: Are you kidding me?\n"); + panic("Lucy in the sky...."); + } + p->imap_slots[i] = imap; + + /* map to real upaid */ + upaid = (((upaid & 0x3c) << 1) | + ((upaid & 0x40) >> 4) | + (upaid & 0x3)); + + upa_writel(upaid, p->tregs[i]); + + return i; +} diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c new file mode 100644 index 000000000..9a137c70e --- /dev/null +++ b/arch/sparc/kernel/sun4d_irq.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SS1000/SC2000 interrupt handling. + * + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Heavily based on arch/sparc/kernel/irq.c. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "irq.h" + +/* Sun4d interrupts fall roughly into two categories. SBUS and + * cpu local. CPU local interrupts cover the timer interrupts + * and whatnot, and we encode those as normal PILs between + * 0 and 15. + * SBUS interrupts are encodes as a combination of board, level and slot. + */ + +struct sun4d_handler_data { + unsigned int cpuid; /* target cpu */ + unsigned int real_irq; /* interrupt level */ +}; + + +static unsigned int sun4d_encode_irq(int board, int lvl, int slot) +{ + return (board + 1) << 5 | (lvl << 2) | slot; +} + +struct sun4d_timer_regs { + u32 l10_timer_limit; + u32 l10_cur_countx; + u32 l10_limit_noclear; + u32 ctrl; + u32 l10_cur_count; +}; + +static struct sun4d_timer_regs __iomem *sun4d_timers; + +#define SUN4D_TIMER_IRQ 10 + +/* Specify which cpu handle interrupts from which board. + * Index is board - value is cpu. + */ +static unsigned char board_to_cpu[32]; + +static int pil_to_sbus[] = { + 0, + 0, + 1, + 2, + 0, + 3, + 0, + 4, + 0, + 5, + 0, + 6, + 0, + 7, + 0, + 0, +}; + +/* Exported for sun4d_smp.c */ +DEFINE_SPINLOCK(sun4d_imsk_lock); + +/* SBUS interrupts are encoded integers including the board number + * (plus one), the SBUS level, and the SBUS slot number. Sun4D + * IRQ dispatch is done by: + * + * 1) Reading the BW local interrupt table in order to get the bus + * interrupt mask. + * + * This table is indexed by SBUS interrupt level which can be + * derived from the PIL we got interrupted on. + * + * 2) For each bus showing interrupt pending from #1, read the + * SBI interrupt state register. This will indicate which slots + * have interrupts pending for that SBUS interrupt level. + * + * 3) Call the genreric IRQ support. + */ +static void sun4d_sbus_handler_irq(int sbusl) +{ + unsigned int bus_mask; + unsigned int sbino, slot; + unsigned int sbil; + + bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff; + bw_clear_intr_mask(sbusl, bus_mask); + + sbil = (sbusl << 2); + /* Loop for each pending SBI */ + for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) { + unsigned int idx, mask; + + if (!(bus_mask & 1)) + continue; + /* XXX This seems to ACK the irq twice. acquire_sbi() + * XXX uses swap, therefore this writes 0xf << sbil, + * XXX then later release_sbi() will write the individual + * XXX bits which were set again. + */ + mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil); + mask &= (0xf << sbil); + + /* Loop for each pending SBI slot */ + slot = (1 << sbil); + for (idx = 0; mask != 0; idx++, slot <<= 1) { + unsigned int pil; + struct irq_bucket *p; + + if (!(mask & slot)) + continue; + + mask &= ~slot; + pil = sun4d_encode_irq(sbino, sbusl, idx); + + p = irq_map[pil]; + while (p) { + struct irq_bucket *next; + + next = p->next; + generic_handle_irq(p->irq); + p = next; + } + release_sbi(SBI2DEVID(sbino), slot); + } + } +} + +void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + /* SBUS IRQ level (1 - 7) */ + int sbusl = pil_to_sbus[pil]; + + /* FIXME: Is this necessary?? */ + cc_get_ipen(); + + cc_set_iclr(1 << pil); + +#ifdef CONFIG_SMP + /* + * Check IPI data structures after IRQ has been cleared. Hard and Soft + * IRQ can happen at the same time, so both cases are always handled. + */ + if (pil == SUN4D_IPI_IRQ) + sun4d_ipi_interrupt(); +#endif + + old_regs = set_irq_regs(regs); + irq_enter(); + if (sbusl == 0) { + /* cpu interrupt */ + struct irq_bucket *p; + + p = irq_map[pil]; + while (p) { + struct irq_bucket *next; + + next = p->next; + generic_handle_irq(p->irq); + p = next; + } + } else { + /* SBUS interrupt */ + sun4d_sbus_handler_irq(sbusl); + } + irq_exit(); + set_irq_regs(old_regs); +} + + +static void sun4d_mask_irq(struct irq_data *data) +{ + struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data); + unsigned int real_irq; +#ifdef CONFIG_SMP + int cpuid = handler_data->cpuid; + unsigned long flags; +#endif + real_irq = handler_data->real_irq; +#ifdef CONFIG_SMP + spin_lock_irqsave(&sun4d_imsk_lock, flags); + cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | (1 << real_irq)); + spin_unlock_irqrestore(&sun4d_imsk_lock, flags); +#else + cc_set_imsk(cc_get_imsk() | (1 << real_irq)); +#endif +} + +static void sun4d_unmask_irq(struct irq_data *data) +{ + struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data); + unsigned int real_irq; +#ifdef CONFIG_SMP + int cpuid = handler_data->cpuid; + unsigned long flags; +#endif + real_irq = handler_data->real_irq; + +#ifdef CONFIG_SMP + spin_lock_irqsave(&sun4d_imsk_lock, flags); + cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq)); + spin_unlock_irqrestore(&sun4d_imsk_lock, flags); +#else + cc_set_imsk(cc_get_imsk() & ~(1 << real_irq)); +#endif +} + +static unsigned int sun4d_startup_irq(struct irq_data *data) +{ + irq_link(data->irq); + sun4d_unmask_irq(data); + return 0; +} + +static void sun4d_shutdown_irq(struct irq_data *data) +{ + sun4d_mask_irq(data); + irq_unlink(data->irq); +} + +static struct irq_chip sun4d_irq = { + .name = "sun4d", + .irq_startup = sun4d_startup_irq, + .irq_shutdown = sun4d_shutdown_irq, + .irq_unmask = sun4d_unmask_irq, + .irq_mask = sun4d_mask_irq, +}; + +#ifdef CONFIG_SMP +/* Setup IRQ distribution scheme. */ +void __init sun4d_distribute_irqs(void) +{ + struct device_node *dp; + + int cpuid = cpu_logical_map(1); + + if (cpuid == -1) + cpuid = cpu_logical_map(0); + for_each_node_by_name(dp, "sbi") { + int devid = of_getintprop_default(dp, "device-id", 0); + int board = of_getintprop_default(dp, "board#", 0); + board_to_cpu[board] = cpuid; + set_sbi_tid(devid, cpuid << 3); + } + printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid); +} +#endif + +static void sun4d_clear_clock_irq(void) +{ + sbus_readl(&sun4d_timers->l10_timer_limit); +} + +static void sun4d_load_profile_irq(int cpu, unsigned int limit) +{ + unsigned int value = limit ? timer_value(limit) : 0; + bw_set_prof_limit(cpu, value); +} + +static void __init sun4d_load_profile_irqs(void) +{ + int cpu = 0, mid; + + while (!cpu_find_by_instance(cpu, NULL, &mid)) { + sun4d_load_profile_irq(mid >> 3, 0); + cpu++; + } +} + +static unsigned int _sun4d_build_device_irq(unsigned int real_irq, + unsigned int pil, + unsigned int board) +{ + struct sun4d_handler_data *handler_data; + unsigned int irq; + + irq = irq_alloc(real_irq, pil); + if (irq == 0) { + prom_printf("IRQ: allocate for %d %d %d failed\n", + real_irq, pil, board); + goto err_out; + } + + handler_data = irq_get_handler_data(irq); + if (unlikely(handler_data)) + goto err_out; + + handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) { + prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n"); + prom_halt(); + } + handler_data->cpuid = board_to_cpu[board]; + handler_data->real_irq = real_irq; + irq_set_chip_and_handler_name(irq, &sun4d_irq, + handle_level_irq, "level"); + irq_set_handler_data(irq, handler_data); + +err_out: + return irq; +} + + + +static unsigned int sun4d_build_device_irq(struct platform_device *op, + unsigned int real_irq) +{ + struct device_node *dp = op->dev.of_node; + struct device_node *board_parent, *bus = dp->parent; + char *bus_connection; + const struct linux_prom_registers *regs; + unsigned int pil; + unsigned int irq; + int board, slot; + int sbusl; + + irq = real_irq; + while (bus) { + if (of_node_name_eq(bus, "sbi")) { + bus_connection = "io-unit"; + break; + } + + if (of_node_name_eq(bus, "bootbus")) { + bus_connection = "cpu-unit"; + break; + } + + bus = bus->parent; + } + if (!bus) + goto err_out; + + regs = of_get_property(dp, "reg", NULL); + if (!regs) + goto err_out; + + slot = regs->which_io; + + /* + * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit + * lacks a "board#" property, something is very wrong. + */ + if (!of_node_name_eq(bus->parent, bus_connection)) { + printk(KERN_ERR "%pOF: Error, parent is not %s.\n", + bus, bus_connection); + goto err_out; + } + board_parent = bus->parent; + board = of_getintprop_default(board_parent, "board#", -1); + if (board == -1) { + printk(KERN_ERR "%pOF: Error, lacks board# property.\n", + board_parent); + goto err_out; + } + + sbusl = pil_to_sbus[real_irq]; + if (sbusl) + pil = sun4d_encode_irq(board, sbusl, slot); + else + pil = real_irq; + + irq = _sun4d_build_device_irq(real_irq, pil, board); +err_out: + return irq; +} + +static unsigned int sun4d_build_timer_irq(unsigned int board, + unsigned int real_irq) +{ + return _sun4d_build_device_irq(real_irq, real_irq, board); +} + + +static void __init sun4d_fixup_trap_table(void) +{ +#ifdef CONFIG_SMP + unsigned long flags; + struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; + + /* Adjust so that we jump directly to smp4d_ticker */ + lvl14_save[2] += smp4d_ticker - real_irq_entry; + + /* For SMP we use the level 14 ticker, however the bootup code + * has copied the firmware's level 14 vector into the boot cpu's + * trap table, we must fix this now or we get squashed. + */ + local_irq_save(flags); + patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ + trap_table->inst_one = lvl14_save[0]; + trap_table->inst_two = lvl14_save[1]; + trap_table->inst_three = lvl14_save[2]; + trap_table->inst_four = lvl14_save[3]; + local_ops->cache_all(); + local_irq_restore(flags); +#endif +} + +static void __init sun4d_init_timers(void) +{ + struct device_node *dp; + struct resource res; + unsigned int irq; + const u32 *reg; + int err; + int board; + + dp = of_find_node_by_name(NULL, "cpu-unit"); + if (!dp) { + prom_printf("sun4d_init_timers: Unable to find cpu-unit\n"); + prom_halt(); + } + + /* Which cpu-unit we use is arbitrary, we can view the bootbus timer + * registers via any cpu's mapping. The first 'reg' property is the + * bootbus. + */ + reg = of_get_property(dp, "reg", NULL); + if (!reg) { + prom_printf("sun4d_init_timers: No reg property\n"); + prom_halt(); + } + + board = of_getintprop_default(dp, "board#", -1); + if (board == -1) { + prom_printf("sun4d_init_timers: No board# property on cpu-unit\n"); + prom_halt(); + } + + of_node_put(dp); + + res.start = reg[1]; + res.end = reg[2] - 1; + res.flags = reg[0] & 0xff; + sun4d_timers = of_ioremap(&res, BW_TIMER_LIMIT, + sizeof(struct sun4d_timer_regs), "user timer"); + if (!sun4d_timers) { + prom_printf("sun4d_init_timers: Can't map timer regs\n"); + prom_halt(); + } + +#ifdef CONFIG_SMP + sparc_config.cs_period = SBUS_CLOCK_RATE * 2; /* 2 seconds */ +#else + sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec */ + sparc_config.features |= FEAT_L10_CLOCKEVENT; +#endif + sparc_config.features |= FEAT_L10_CLOCKSOURCE; + sbus_writel(timer_value(sparc_config.cs_period), + &sun4d_timers->l10_timer_limit); + + master_l10_counter = &sun4d_timers->l10_cur_count; + + irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ); + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); + if (err) { + prom_printf("sun4d_init_timers: request_irq() failed with %d\n", + err); + prom_halt(); + } + sun4d_load_profile_irqs(); + sun4d_fixup_trap_table(); +} + +void __init sun4d_init_sbi_irq(void) +{ + struct device_node *dp; + int target_cpu; + + target_cpu = boot_cpu_id; + for_each_node_by_name(dp, "sbi") { + int devid = of_getintprop_default(dp, "device-id", 0); + int board = of_getintprop_default(dp, "board#", 0); + unsigned int mask; + + set_sbi_tid(devid, target_cpu << 3); + board_to_cpu[board] = target_cpu; + + /* Get rid of pending irqs from PROM */ + mask = acquire_sbi(devid, 0xffffffff); + if (mask) { + printk(KERN_ERR "Clearing pending IRQs %08x on SBI %d\n", + mask, board); + release_sbi(devid, mask); + } + } +} + +void __init sun4d_init_IRQ(void) +{ + local_irq_disable(); + + sparc_config.init_timers = sun4d_init_timers; + sparc_config.build_device_irq = sun4d_build_device_irq; + sparc_config.clock_rate = SBUS_CLOCK_RATE; + sparc_config.clear_clock_irq = sun4d_clear_clock_irq; + sparc_config.load_profile_irq = sun4d_load_profile_irq; + + /* Cannot enable interrupts until OBP ticker is disabled. */ +} diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c new file mode 100644 index 000000000..9a62a5cf3 --- /dev/null +++ b/arch/sparc/kernel/sun4d_smp.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Sparc SS1000/SC2000 SMP support. + * + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * + * Based on sun4m's smp.c, which is: + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "irq.h" + +#define IRQ_CROSS_CALL 15 + +static volatile int smp_processors_ready; +static int smp_highest_cpu; + +static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val) +{ + __asm__ __volatile__("swap [%1], %0\n\t" : + "=&r" (val), "=&r" (ptr) : + "0" (val), "1" (ptr)); + return val; +} + +static void smp4d_ipi_init(void); + +static unsigned char cpu_leds[32]; + +static inline void show_leds(int cpuid) +{ + cpuid &= 0x1e; + __asm__ __volatile__ ("stba %0, [%1] %2" : : + "r" ((cpu_leds[cpuid] << 4) | cpu_leds[cpuid+1]), + "r" (ECSR_BASE(cpuid) | BB_LEDS), + "i" (ASI_M_CTL)); +} + +void sun4d_cpu_pre_starting(void *arg) +{ + int cpuid = hard_smp_processor_id(); + + /* Show we are alive */ + cpu_leds[cpuid] = 0x6; + show_leds(cpuid); + + /* Enable level15 interrupt, disable level14 interrupt for now */ + cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); +} + +void sun4d_cpu_pre_online(void *arg) +{ + unsigned long flags; + int cpuid; + + cpuid = hard_smp_processor_id(); + + /* Unblock the master CPU _only_ when the scheduler state + * of all secondary CPUs will be up-to-date, so after + * the SMP initialization the master will be just allowed + * to call the scheduler code. + */ + sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1); + local_ops->cache_all(); + local_ops->tlb_all(); + + while ((unsigned long)current_set[cpuid] < PAGE_OFFSET) + barrier(); + + while (current_set[cpuid]->cpu != cpuid) + barrier(); + + /* Fix idle thread fields. */ + __asm__ __volatile__("ld [%0], %%g6\n\t" + : : "r" (¤t_set[cpuid]) + : "memory" /* paranoid */); + + cpu_leds[cpuid] = 0x9; + show_leds(cpuid); + + /* Attach to the address space of init_task. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + + local_ops->cache_all(); + local_ops->tlb_all(); + + while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) + barrier(); + + spin_lock_irqsave(&sun4d_imsk_lock, flags); + cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ + spin_unlock_irqrestore(&sun4d_imsk_lock, flags); +} + +/* + * Cycle through the processors asking the PROM to start each one. + */ +void __init smp4d_boot_cpus(void) +{ + smp4d_ipi_init(); + if (boot_cpu_id) + current_set[0] = NULL; + local_ops->cache_all(); +} + +int smp4d_boot_one_cpu(int i, struct task_struct *idle) +{ + unsigned long *entry = &sun4d_cpu_startup; + int timeout; + int cpu_node; + + cpu_find_by_instance(i, &cpu_node, NULL); + current_set[i] = task_thread_info(idle); + /* + * Initialize the contexts table + * Since the call to prom_startcpu() trashes the structure, + * we need to re-initialize it for each cpu + */ + smp_penguin_ctable.which_io = 0; + smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; + smp_penguin_ctable.reg_size = 0; + + /* whirrr, whirrr, whirrrrrrrrr... */ + printk(KERN_INFO "Starting CPU %d at %p\n", i, entry); + local_ops->cache_all(); + prom_startcpu(cpu_node, + &smp_penguin_ctable, 0, (char *)entry); + + printk(KERN_INFO "prom_startcpu returned :)\n"); + + /* wheee... it's going... */ + for (timeout = 0; timeout < 10000; timeout++) { + if (cpu_callin_map[i]) + break; + udelay(200); + } + + if (!(cpu_callin_map[i])) { + printk(KERN_ERR "Processor %d is stuck.\n", i); + return -ENODEV; + + } + local_ops->cache_all(); + return 0; +} + +void __init smp4d_smp_done(void) +{ + int i, first; + int *prev; + + /* setup cpu list for irq rotation */ + first = 0; + prev = &first; + for_each_online_cpu(i) { + *prev = i; + prev = &cpu_data(i).next; + } + *prev = first; + local_ops->cache_all(); + + /* Ok, they are spinning and ready to go. */ + smp_processors_ready = 1; + sun4d_distribute_irqs(); +} + +/* Memory structure giving interrupt handler information about IPI generated */ +struct sun4d_ipi_work { + int single; + int msk; + int resched; +}; + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct sun4d_ipi_work, sun4d_ipi_work); + +/* Initialize IPIs on the SUN4D SMP machine */ +static void __init smp4d_ipi_init(void) +{ + int cpu; + struct sun4d_ipi_work *work; + + printk(KERN_INFO "smp4d: setup IPI at IRQ %d\n", SUN4D_IPI_IRQ); + + for_each_possible_cpu(cpu) { + work = &per_cpu(sun4d_ipi_work, cpu); + work->single = work->msk = work->resched = 0; + } +} + +void sun4d_ipi_interrupt(void) +{ + struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work); + + if (work->single) { + work->single = 0; + smp_call_function_single_interrupt(); + } + if (work->msk) { + work->msk = 0; + smp_call_function_interrupt(); + } + if (work->resched) { + work->resched = 0; + smp_resched_interrupt(); + } +} + +/* +-------+-------------+-----------+------------------------------------+ + * | bcast | devid | sid | levels mask | + * +-------+-------------+-----------+------------------------------------+ + * 31 30 23 22 15 14 0 + */ +#define IGEN_MESSAGE(bcast, devid, sid, levels) \ + (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels)) + +static void sun4d_send_ipi(int cpu, int level) +{ + cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); +} + +static void sun4d_ipi_single(int cpu) +{ + struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); + + /* Mark work */ + work->single = 1; + + /* Generate IRQ on the CPU */ + sun4d_send_ipi(cpu, SUN4D_IPI_IRQ); +} + +static void sun4d_ipi_mask_one(int cpu) +{ + struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); + + /* Mark work */ + work->msk = 1; + + /* Generate IRQ on the CPU */ + sun4d_send_ipi(cpu, SUN4D_IPI_IRQ); +} + +static void sun4d_ipi_resched(int cpu) +{ + struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); + + /* Mark work */ + work->resched = 1; + + /* Generate IRQ on the CPU (any IRQ will cause resched) */ + sun4d_send_ipi(cpu, SUN4D_IPI_IRQ); +} + +static struct smp_funcall { + void *func; + unsigned long arg1; + unsigned long arg2; + unsigned long arg3; + unsigned long arg4; + unsigned long arg5; + unsigned char processors_in[NR_CPUS]; /* Set when ipi entered. */ + unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */ +} ccall_info __attribute__((aligned(8))); + +static DEFINE_SPINLOCK(cross_call_lock); + +/* Cross calls must be serialized, at least currently. */ +static void sun4d_cross_call(void *func, cpumask_t mask, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4) +{ + if (smp_processors_ready) { + register int high = smp_highest_cpu; + unsigned long flags; + + spin_lock_irqsave(&cross_call_lock, flags); + + { + /* + * If you make changes here, make sure + * gcc generates proper code... + */ + register void *f asm("i0") = func; + register unsigned long a1 asm("i1") = arg1; + register unsigned long a2 asm("i2") = arg2; + register unsigned long a3 asm("i3") = arg3; + register unsigned long a4 asm("i4") = arg4; + register unsigned long a5 asm("i5") = 0; + + __asm__ __volatile__( + "std %0, [%6]\n\t" + "std %2, [%6 + 8]\n\t" + "std %4, [%6 + 16]\n\t" : : + "r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), + "r" (&ccall_info.func)); + } + + /* Init receive/complete mapping, plus fire the IPI's off. */ + { + register int i; + + cpumask_clear_cpu(smp_processor_id(), &mask); + cpumask_and(&mask, cpu_online_mask, &mask); + for (i = 0; i <= high; i++) { + if (cpumask_test_cpu(i, &mask)) { + ccall_info.processors_in[i] = 0; + ccall_info.processors_out[i] = 0; + sun4d_send_ipi(i, IRQ_CROSS_CALL); + } + } + } + + { + register int i; + + i = 0; + do { + if (!cpumask_test_cpu(i, &mask)) + continue; + while (!ccall_info.processors_in[i]) + barrier(); + } while (++i <= high); + + i = 0; + do { + if (!cpumask_test_cpu(i, &mask)) + continue; + while (!ccall_info.processors_out[i]) + barrier(); + } while (++i <= high); + } + + spin_unlock_irqrestore(&cross_call_lock, flags); + } +} + +/* Running cross calls. */ +void smp4d_cross_call_irq(void) +{ + void (*func)(unsigned long, unsigned long, unsigned long, unsigned long, + unsigned long) = ccall_info.func; + int i = hard_smp_processor_id(); + + ccall_info.processors_in[i] = 1; + func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, + ccall_info.arg5); + ccall_info.processors_out[i] = 1; +} + +void smp4d_percpu_timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + int cpu = hard_smp_processor_id(); + struct clock_event_device *ce; + static int cpu_tick[NR_CPUS]; + static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd }; + + old_regs = set_irq_regs(regs); + bw_get_prof_limit(cpu); + bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */ + + cpu_tick[cpu]++; + if (!(cpu_tick[cpu] & 15)) { + if (cpu_tick[cpu] == 0x60) + cpu_tick[cpu] = 0; + cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4]; + show_leds(cpu); + } + + ce = &per_cpu(sparc32_clockevent, cpu); + + irq_enter(); + ce->event_handler(ce); + irq_exit(); + + set_irq_regs(old_regs); +} + +static const struct sparc32_ipi_ops sun4d_ipi_ops = { + .cross_call = sun4d_cross_call, + .resched = sun4d_ipi_resched, + .single = sun4d_ipi_single, + .mask_one = sun4d_ipi_mask_one, +}; + +void __init sun4d_init_smp(void) +{ + int i; + + /* Patch ipi15 trap table */ + t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m); + + sparc32_ipi_ops = &sun4d_ipi_ops; + + for (i = 0; i < NR_CPUS; i++) { + ccall_info.processors_in[i] = 1; + ccall_info.processors_out[i] = 1; + } +} diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c new file mode 100644 index 000000000..107963898 --- /dev/null +++ b/arch/sparc/kernel/sun4m_irq.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun4m irq support + * + * djhr: Hacked out of irq.c into a CPU dependent version. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com) + * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "irq.h" +#include "kernel.h" + +/* Sample sun4m IRQ layout: + * + * 0x22 - Power + * 0x24 - ESP SCSI + * 0x26 - Lance ethernet + * 0x2b - Floppy + * 0x2c - Zilog uart + * 0x32 - SBUS level 0 + * 0x33 - Parallel port, SBUS level 1 + * 0x35 - SBUS level 2 + * 0x37 - SBUS level 3 + * 0x39 - Audio, Graphics card, SBUS level 4 + * 0x3b - SBUS level 5 + * 0x3d - SBUS level 6 + * + * Each interrupt source has a mask bit in the interrupt registers. + * When the mask bit is set, this blocks interrupt deliver. So you + * clear the bit to enable the interrupt. + * + * Interrupts numbered less than 0x10 are software triggered interrupts + * and unused by Linux. + * + * Interrupt level assignment on sun4m: + * + * level source + * ------------------------------------------------------------ + * 1 softint-1 + * 2 softint-2, VME/SBUS level 1 + * 3 softint-3, VME/SBUS level 2 + * 4 softint-4, onboard SCSI + * 5 softint-5, VME/SBUS level 3 + * 6 softint-6, onboard ETHERNET + * 7 softint-7, VME/SBUS level 4 + * 8 softint-8, onboard VIDEO + * 9 softint-9, VME/SBUS level 5, Module Interrupt + * 10 softint-10, system counter/timer + * 11 softint-11, VME/SBUS level 6, Floppy + * 12 softint-12, Keyboard/Mouse, Serial + * 13 softint-13, VME/SBUS level 7, ISDN Audio + * 14 softint-14, per-processor counter/timer + * 15 softint-15, Asynchronous Errors (broadcast) + * + * Each interrupt source is masked distinctly in the sun4m interrupt + * registers. The PIL level alone is therefore ambiguous, since multiple + * interrupt sources map to a single PIL. + * + * This ambiguity is resolved in the 'intr' property for device nodes + * in the OF device tree. Each 'intr' property entry is composed of + * two 32-bit words. The first word is the IRQ priority value, which + * is what we're intersted in. The second word is the IRQ vector, which + * is unused. + * + * The low 4 bits of the IRQ priority indicate the PIL, and the upper + * 4 bits indicate onboard vs. SBUS leveled vs. VME leveled. 0x20 + * means onboard, 0x30 means SBUS leveled, and 0x40 means VME leveled. + * + * For example, an 'intr' IRQ priority value of 0x24 is onboard SCSI + * whereas a value of 0x33 is SBUS level 2. Here are some sample + * 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and + * Tadpole S3 GX systems. + * + * esp: 0x24 onboard ESP SCSI + * le: 0x26 onboard Lance ETHERNET + * p9100: 0x32 SBUS level 1 P9100 video + * bpp: 0x33 SBUS level 2 BPP parallel port device + * DBRI: 0x39 SBUS level 5 DBRI ISDN audio + * SUNW,leo: 0x39 SBUS level 5 LEO video + * pcmcia: 0x3b SBUS level 6 PCMCIA controller + * uctrl: 0x3b SBUS level 6 UCTRL device + * modem: 0x3d SBUS level 7 MODEM + * zs: 0x2c onboard keyboard/mouse/serial + * floppy: 0x2b onboard Floppy + * power: 0x22 onboard power device (XXX unknown mask bit XXX) + */ + + +/* Code in entry.S needs to get at these register mappings. */ +struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS]; +struct sun4m_irq_global __iomem *sun4m_irq_global; + +struct sun4m_handler_data { + bool percpu; + long mask; +}; + +/* Dave Redman (djhr@tadpole.co.uk) + * The sun4m interrupt registers. + */ +#define SUN4M_INT_ENABLE 0x80000000 +#define SUN4M_INT_E14 0x00000080 +#define SUN4M_INT_E10 0x00080000 + +#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */ +#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */ +#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */ +#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */ +#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */ +#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */ +#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */ +#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */ +#define SUN4M_INT_REALTIME 0x00080000 /* system timer */ +#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */ +#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */ +#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */ +#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */ +#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */ +#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */ +#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */ + +#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \ + SUN4M_INT_M2S_WRITE_ERR | \ + SUN4M_INT_ECC_ERR | \ + SUN4M_INT_VME_ERR) + +#define SUN4M_INT_SBUS(x) (1 << (x+7)) +#define SUN4M_INT_VME(x) (1 << (x)) + +/* Interrupt levels used by OBP */ +#define OBP_INT_LEVEL_SOFT 0x10 +#define OBP_INT_LEVEL_ONBOARD 0x20 +#define OBP_INT_LEVEL_SBUS 0x30 +#define OBP_INT_LEVEL_VME 0x40 + +#define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10) +#define SUN4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14) + +static unsigned long sun4m_imask[0x50] = { + /* 0x00 - SMP */ + 0, SUN4M_SOFT_INT(1), + SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3), + SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5), + SUN4M_SOFT_INT(6), SUN4M_SOFT_INT(7), + SUN4M_SOFT_INT(8), SUN4M_SOFT_INT(9), + SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11), + SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13), + SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15), + /* 0x10 - soft */ + 0, SUN4M_SOFT_INT(1), + SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3), + SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5), + SUN4M_SOFT_INT(6), SUN4M_SOFT_INT(7), + SUN4M_SOFT_INT(8), SUN4M_SOFT_INT(9), + SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11), + SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13), + SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15), + /* 0x20 - onboard */ + 0, 0, 0, 0, + SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0, + SUN4M_INT_VIDEO, SUN4M_INT_MODULE, + SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY, + (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS), + SUN4M_INT_AUDIO, SUN4M_INT_E14, SUN4M_INT_MODULE_ERR, + /* 0x30 - sbus */ + 0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1), + 0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3), + 0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5), + 0, SUN4M_INT_SBUS(6), 0, 0, + /* 0x40 - vme */ + 0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1), + 0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3), + 0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5), + 0, SUN4M_INT_VME(6), 0, 0 +}; + +static void sun4m_mask_irq(struct irq_data *data) +{ + struct sun4m_handler_data *handler_data; + int cpu = smp_processor_id(); + + handler_data = irq_data_get_irq_handler_data(data); + if (handler_data->mask) { + unsigned long flags; + + local_irq_save(flags); + if (handler_data->percpu) { + sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set); + } else { + sbus_writel(handler_data->mask, &sun4m_irq_global->mask_set); + } + local_irq_restore(flags); + } +} + +static void sun4m_unmask_irq(struct irq_data *data) +{ + struct sun4m_handler_data *handler_data; + int cpu = smp_processor_id(); + + handler_data = irq_data_get_irq_handler_data(data); + if (handler_data->mask) { + unsigned long flags; + + local_irq_save(flags); + if (handler_data->percpu) { + sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear); + } else { + sbus_writel(handler_data->mask, &sun4m_irq_global->mask_clear); + } + local_irq_restore(flags); + } +} + +static unsigned int sun4m_startup_irq(struct irq_data *data) +{ + irq_link(data->irq); + sun4m_unmask_irq(data); + return 0; +} + +static void sun4m_shutdown_irq(struct irq_data *data) +{ + sun4m_mask_irq(data); + irq_unlink(data->irq); +} + +static struct irq_chip sun4m_irq = { + .name = "sun4m", + .irq_startup = sun4m_startup_irq, + .irq_shutdown = sun4m_shutdown_irq, + .irq_mask = sun4m_mask_irq, + .irq_unmask = sun4m_unmask_irq, +}; + + +static unsigned int sun4m_build_device_irq(struct platform_device *op, + unsigned int real_irq) +{ + struct sun4m_handler_data *handler_data; + unsigned int irq; + unsigned int pil; + + if (real_irq >= OBP_INT_LEVEL_VME) { + prom_printf("Bogus sun4m IRQ %u\n", real_irq); + prom_halt(); + } + pil = (real_irq & 0xf); + irq = irq_alloc(real_irq, pil); + + if (irq == 0) + goto out; + + handler_data = irq_get_handler_data(irq); + if (unlikely(handler_data)) + goto out; + + handler_data = kzalloc(sizeof(struct sun4m_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) { + prom_printf("IRQ: kzalloc(sun4m_handler_data) failed.\n"); + prom_halt(); + } + + handler_data->mask = sun4m_imask[real_irq]; + handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD; + irq_set_chip_and_handler_name(irq, &sun4m_irq, + handle_level_irq, "level"); + irq_set_handler_data(irq, handler_data); + +out: + return irq; +} + +struct sun4m_timer_percpu { + u32 l14_limit; + u32 l14_count; + u32 l14_limit_noclear; + u32 user_timer_start_stop; +}; + +static struct sun4m_timer_percpu __iomem *timers_percpu[SUN4M_NCPUS]; + +struct sun4m_timer_global { + u32 l10_limit; + u32 l10_count; + u32 l10_limit_noclear; + u32 reserved; + u32 timer_config; +}; + +static struct sun4m_timer_global __iomem *timers_global; + +static void sun4m_clear_clock_irq(void) +{ + sbus_readl(&timers_global->l10_limit); +} + +void sun4m_nmi(struct pt_regs *regs) +{ + unsigned long afsr, afar, si; + + printk(KERN_ERR "Aieee: sun4m NMI received!\n"); + /* XXX HyperSparc hack XXX */ + __asm__ __volatile__("mov 0x500, %%g1\n\t" + "lda [%%g1] 0x4, %0\n\t" + "mov 0x600, %%g1\n\t" + "lda [%%g1] 0x4, %1\n\t" : + "=r" (afsr), "=r" (afar)); + printk(KERN_ERR "afsr=%08lx afar=%08lx\n", afsr, afar); + si = sbus_readl(&sun4m_irq_global->pending); + printk(KERN_ERR "si=%08lx\n", si); + if (si & SUN4M_INT_MODULE_ERR) + printk(KERN_ERR "Module async error\n"); + if (si & SUN4M_INT_M2S_WRITE_ERR) + printk(KERN_ERR "MBus/SBus async error\n"); + if (si & SUN4M_INT_ECC_ERR) + printk(KERN_ERR "ECC memory error\n"); + if (si & SUN4M_INT_VME_ERR) + printk(KERN_ERR "VME async error\n"); + printk(KERN_ERR "you lose buddy boy...\n"); + show_regs(regs); + prom_halt(); +} + +void sun4m_unmask_profile_irq(void) +{ + unsigned long flags; + + local_irq_save(flags); + sbus_writel(sun4m_imask[SUN4M_PROFILE_IRQ], &sun4m_irq_global->mask_clear); + local_irq_restore(flags); +} + +void sun4m_clear_profile_irq(int cpu) +{ + sbus_readl(&timers_percpu[cpu]->l14_limit); +} + +static void sun4m_load_profile_irq(int cpu, unsigned int limit) +{ + unsigned int value = limit ? timer_value(limit) : 0; + sbus_writel(value, &timers_percpu[cpu]->l14_limit); +} + +static void __init sun4m_init_timers(void) +{ + struct device_node *dp = of_find_node_by_name(NULL, "counter"); + int i, err, len, num_cpu_timers; + unsigned int irq; + const u32 *addr; + + if (!dp) { + printk(KERN_ERR "sun4m_init_timers: No 'counter' node.\n"); + return; + } + + addr = of_get_property(dp, "address", &len); + of_node_put(dp); + if (!addr) { + printk(KERN_ERR "sun4m_init_timers: No 'address' prop.\n"); + return; + } + + num_cpu_timers = (len / sizeof(u32)) - 1; + for (i = 0; i < num_cpu_timers; i++) { + timers_percpu[i] = (void __iomem *) + (unsigned long) addr[i]; + } + timers_global = (void __iomem *) + (unsigned long) addr[num_cpu_timers]; + + /* Every per-cpu timer works in timer mode */ + sbus_writel(0x00000000, &timers_global->timer_config); + +#ifdef CONFIG_SMP + sparc_config.cs_period = SBUS_CLOCK_RATE * 2; /* 2 seconds */ + sparc_config.features |= FEAT_L14_ONESHOT; +#else + sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec */ + sparc_config.features |= FEAT_L10_CLOCKEVENT; +#endif + sparc_config.features |= FEAT_L10_CLOCKSOURCE; + sbus_writel(timer_value(sparc_config.cs_period), + &timers_global->l10_limit); + + master_l10_counter = &timers_global->l10_count; + + irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ); + + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); + if (err) { + printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n", + err); + return; + } + + for (i = 0; i < num_cpu_timers; i++) + sbus_writel(0, &timers_percpu[i]->l14_limit); + if (num_cpu_timers == 4) + sbus_writel(SUN4M_INT_E14, &sun4m_irq_global->mask_set); + +#ifdef CONFIG_SMP + { + unsigned long flags; + struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; + + /* For SMP we use the level 14 ticker, however the bootup code + * has copied the firmware's level 14 vector into the boot cpu's + * trap table, we must fix this now or we get squashed. + */ + local_irq_save(flags); + trap_table->inst_one = lvl14_save[0]; + trap_table->inst_two = lvl14_save[1]; + trap_table->inst_three = lvl14_save[2]; + trap_table->inst_four = lvl14_save[3]; + local_ops->cache_all(); + local_irq_restore(flags); + } +#endif +} + +void __init sun4m_init_IRQ(void) +{ + struct device_node *dp = of_find_node_by_name(NULL, "interrupt"); + int len, i, mid, num_cpu_iregs; + const u32 *addr; + + if (!dp) { + printk(KERN_ERR "sun4m_init_IRQ: No 'interrupt' node.\n"); + return; + } + + addr = of_get_property(dp, "address", &len); + of_node_put(dp); + if (!addr) { + printk(KERN_ERR "sun4m_init_IRQ: No 'address' prop.\n"); + return; + } + + num_cpu_iregs = (len / sizeof(u32)) - 1; + for (i = 0; i < num_cpu_iregs; i++) { + sun4m_irq_percpu[i] = (void __iomem *) + (unsigned long) addr[i]; + } + sun4m_irq_global = (void __iomem *) + (unsigned long) addr[num_cpu_iregs]; + + local_irq_disable(); + + sbus_writel(~SUN4M_INT_MASKALL, &sun4m_irq_global->mask_set); + for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) + sbus_writel(~0x17fff, &sun4m_irq_percpu[mid]->clear); + + if (num_cpu_iregs == 4) + sbus_writel(0, &sun4m_irq_global->interrupt_target); + + sparc_config.init_timers = sun4m_init_timers; + sparc_config.build_device_irq = sun4m_build_device_irq; + sparc_config.clock_rate = SBUS_CLOCK_RATE; + sparc_config.clear_clock_irq = sun4m_clear_clock_irq; + sparc_config.load_profile_irq = sun4m_load_profile_irq; + + + /* Cannot enable interrupts until OBP ticker is disabled. */ +} diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c new file mode 100644 index 000000000..056df034e --- /dev/null +++ b/arch/sparc/kernel/sun4m_smp.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun4m SMP support. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "irq.h" +#include "kernel.h" + +#define IRQ_IPI_SINGLE 12 +#define IRQ_IPI_MASK 13 +#define IRQ_IPI_RESCHED 14 +#define IRQ_CROSS_CALL 15 + +static inline unsigned long +swap_ulong(volatile unsigned long *ptr, unsigned long val) +{ + __asm__ __volatile__("swap [%1], %0\n\t" : + "=&r" (val), "=&r" (ptr) : + "0" (val), "1" (ptr)); + return val; +} + +void sun4m_cpu_pre_starting(void *arg) +{ +} + +void sun4m_cpu_pre_online(void *arg) +{ + int cpuid = hard_smp_processor_id(); + + /* Allow master to continue. The master will then give us the + * go-ahead by setting the smp_commenced_mask and will wait without + * timeouts until our setup is completed fully (signified by + * our bit being set in the cpu_online_mask). + */ + swap_ulong(&cpu_callin_map[cpuid], 1); + + /* XXX: What's up with all the flushes? */ + local_ops->cache_all(); + local_ops->tlb_all(); + + /* Fix idle thread fields. */ + __asm__ __volatile__("ld [%0], %%g6\n\t" + : : "r" (¤t_set[cpuid]) + : "memory" /* paranoid */); + + /* Attach to the address space of init_task. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + + while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) + mb(); +} + +/* + * Cycle through the processors asking the PROM to start each one. + */ +void __init smp4m_boot_cpus(void) +{ + sun4m_unmask_profile_irq(); + local_ops->cache_all(); +} + +int smp4m_boot_one_cpu(int i, struct task_struct *idle) +{ + unsigned long *entry = &sun4m_cpu_startup; + int timeout; + int cpu_node; + + cpu_find_by_mid(i, &cpu_node); + current_set[i] = task_thread_info(idle); + + /* See trampoline.S for details... */ + entry += ((i - 1) * 3); + + /* + * Initialize the contexts table + * Since the call to prom_startcpu() trashes the structure, + * we need to re-initialize it for each cpu + */ + smp_penguin_ctable.which_io = 0; + smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; + smp_penguin_ctable.reg_size = 0; + + /* whirrr, whirrr, whirrrrrrrrr... */ + printk(KERN_INFO "Starting CPU %d at %p\n", i, entry); + local_ops->cache_all(); + prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry); + + /* wheee... it's going... */ + for (timeout = 0; timeout < 10000; timeout++) { + if (cpu_callin_map[i]) + break; + udelay(200); + } + + if (!(cpu_callin_map[i])) { + printk(KERN_ERR "Processor %d is stuck.\n", i); + return -ENODEV; + } + + local_ops->cache_all(); + return 0; +} + +void __init smp4m_smp_done(void) +{ + int i, first; + int *prev; + + /* setup cpu list for irq rotation */ + first = 0; + prev = &first; + for_each_online_cpu(i) { + *prev = i; + prev = &cpu_data(i).next; + } + *prev = first; + local_ops->cache_all(); + + /* Ok, they are spinning and ready to go. */ +} + +static void sun4m_send_ipi(int cpu, int level) +{ + sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set); +} + +static void sun4m_ipi_resched(int cpu) +{ + sun4m_send_ipi(cpu, IRQ_IPI_RESCHED); +} + +static void sun4m_ipi_single(int cpu) +{ + sun4m_send_ipi(cpu, IRQ_IPI_SINGLE); +} + +static void sun4m_ipi_mask_one(int cpu) +{ + sun4m_send_ipi(cpu, IRQ_IPI_MASK); +} + +static struct smp_funcall { + void *func; + unsigned long arg1; + unsigned long arg2; + unsigned long arg3; + unsigned long arg4; + unsigned long arg5; + unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */ + unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */ +} ccall_info; + +static DEFINE_SPINLOCK(cross_call_lock); + +/* Cross calls must be serialized, at least currently. */ +static void sun4m_cross_call(void *func, cpumask_t mask, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4) +{ + register int ncpus = SUN4M_NCPUS; + unsigned long flags; + + spin_lock_irqsave(&cross_call_lock, flags); + + /* Init function glue. */ + ccall_info.func = func; + ccall_info.arg1 = arg1; + ccall_info.arg2 = arg2; + ccall_info.arg3 = arg3; + ccall_info.arg4 = arg4; + ccall_info.arg5 = 0; + + /* Init receive/complete mapping, plus fire the IPI's off. */ + { + register int i; + + cpumask_clear_cpu(smp_processor_id(), &mask); + cpumask_and(&mask, cpu_online_mask, &mask); + for (i = 0; i < ncpus; i++) { + if (cpumask_test_cpu(i, &mask)) { + ccall_info.processors_in[i] = 0; + ccall_info.processors_out[i] = 0; + sun4m_send_ipi(i, IRQ_CROSS_CALL); + } else { + ccall_info.processors_in[i] = 1; + ccall_info.processors_out[i] = 1; + } + } + } + + { + register int i; + + i = 0; + do { + if (!cpumask_test_cpu(i, &mask)) + continue; + while (!ccall_info.processors_in[i]) + barrier(); + } while (++i < ncpus); + + i = 0; + do { + if (!cpumask_test_cpu(i, &mask)) + continue; + while (!ccall_info.processors_out[i]) + barrier(); + } while (++i < ncpus); + } + spin_unlock_irqrestore(&cross_call_lock, flags); +} + +/* Running cross calls. */ +void smp4m_cross_call_irq(void) +{ + void (*func)(unsigned long, unsigned long, unsigned long, unsigned long, + unsigned long) = ccall_info.func; + int i = smp_processor_id(); + + ccall_info.processors_in[i] = 1; + func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, + ccall_info.arg5); + ccall_info.processors_out[i] = 1; +} + +void smp4m_percpu_timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + struct clock_event_device *ce; + int cpu = smp_processor_id(); + + old_regs = set_irq_regs(regs); + + ce = &per_cpu(sparc32_clockevent, cpu); + + if (clockevent_state_periodic(ce)) + sun4m_clear_profile_irq(cpu); + else + sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */ + + irq_enter(); + ce->event_handler(ce); + irq_exit(); + + set_irq_regs(old_regs); +} + +static const struct sparc32_ipi_ops sun4m_ipi_ops = { + .cross_call = sun4m_cross_call, + .resched = sun4m_ipi_resched, + .single = sun4m_ipi_single, + .mask_one = sun4m_ipi_mask_one, +}; + +void __init sun4m_init_smp(void) +{ + sparc32_ipi_ops = &sun4m_ipi_ops; +} diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S new file mode 100644 index 000000000..6478ef4f6 --- /dev/null +++ b/arch/sparc/kernel/sun4v_ivec.S @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* sun4v_ivec.S: Sun4v interrupt vector handling. + * + * Copyright (C) 2006 + */ + +#include +#include +#include + + .text + .align 32 + +sun4v_cpu_mondo: + /* Head offset in %g2, tail offset in %g4. + * If they are the same, no work. + */ + mov INTRQ_CPU_MONDO_HEAD, %g2 + ldxa [%g2] ASI_QUEUE, %g2 + mov INTRQ_CPU_MONDO_TAIL, %g4 + ldxa [%g4] ASI_QUEUE, %g4 + cmp %g2, %g4 + be,pn %xcc, sun4v_cpu_mondo_queue_empty + nop + + /* Get &trap_block[smp_processor_id()] into %g4. */ + ldxa [%g0] ASI_SCRATCHPAD, %g4 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 + + /* Get smp_processor_id() into %g3 */ + sethi %hi(trap_block), %g5 + or %g5, %lo(trap_block), %g5 + sub %g4, %g5, %g3 + srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + + /* Increment cpu_mondo_counter[smp_processor_id()] */ + sethi %hi(cpu_mondo_counter), %g5 + or %g5, %lo(cpu_mondo_counter), %g5 + sllx %g3, 3, %g3 + add %g5, %g3, %g5 + ldx [%g5], %g3 + add %g3, 1, %g3 + stx %g3, [%g5] + + /* Get CPU mondo queue base phys address into %g7. */ + ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 + + /* Now get the cross-call arguments and handler PC, same + * layout as sun4u: + * + * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it + * high half is context arg to MMU flushes, into %g5 + * 2nd 64-bit word: 64-bit arg, load into %g1 + * 3rd 64-bit word: 64-bit arg, load into %g7 + */ + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3 + add %g2, 0x8, %g2 + srlx %g3, 32, %g5 + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 + add %g2, 0x8, %g2 + srl %g3, 0, %g3 + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7 + add %g2, 0x40 - 0x8 - 0x8, %g2 + + /* Update queue head pointer. */ + lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4 + and %g2, %g4, %g2 + + mov INTRQ_CPU_MONDO_HEAD, %g4 + stxa %g2, [%g4] ASI_QUEUE + membar #Sync + + jmpl %g3, %g0 + nop + +sun4v_cpu_mondo_queue_empty: + retry + +sun4v_dev_mondo: + /* Head offset in %g2, tail offset in %g4. */ + mov INTRQ_DEVICE_MONDO_HEAD, %g2 + ldxa [%g2] ASI_QUEUE, %g2 + mov INTRQ_DEVICE_MONDO_TAIL, %g4 + ldxa [%g4] ASI_QUEUE, %g4 + cmp %g2, %g4 + be,pn %xcc, sun4v_dev_mondo_queue_empty + nop + + /* Get &trap_block[smp_processor_id()] into %g4. */ + ldxa [%g0] ASI_SCRATCHPAD, %g4 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 + + /* Get DEV mondo queue base phys address into %g5. */ + ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 + + /* Load IVEC into %g3. */ + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + add %g2, 0x40, %g2 + + /* XXX There can be a full 64-byte block of data here. + * XXX This is how we can get at MSI vector data. + * XXX Current we do not capture this, but when we do we'll + * XXX need to add a 64-byte storage area in the struct ino_bucket + * XXX or the struct irq_desc. + */ + + /* Update queue head pointer, this frees up some registers. */ + lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4 + and %g2, %g4, %g2 + + mov INTRQ_DEVICE_MONDO_HEAD, %g4 + stxa %g2, [%g4] ASI_QUEUE + membar #Sync + + TRAP_LOAD_IRQ_WORK_PA(%g1, %g4) + + /* For VIRQs, cookie is encoded as ~bucket_phys_addr */ + brlz,pt %g3, 1f + xnor %g3, %g0, %g4 + + /* Get __pa(&ivector_table[IVEC]) into %g4. */ + sethi %hi(ivector_table_pa), %g4 + ldx [%g4 + %lo(ivector_table_pa)], %g4 + sllx %g3, 4, %g3 + add %g4, %g3, %g4 + +1: ldx [%g1], %g2 + stxa %g2, [%g4] ASI_PHYS_USE_EC + stx %g4, [%g1] + + /* Signal the interrupt by setting (1 << pil) in %softint. */ + wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint + +sun4v_dev_mondo_queue_empty: + retry + +sun4v_res_mondo: + /* Head offset in %g2, tail offset in %g4. */ + mov INTRQ_RESUM_MONDO_HEAD, %g2 + ldxa [%g2] ASI_QUEUE, %g2 + mov INTRQ_RESUM_MONDO_TAIL, %g4 + ldxa [%g4] ASI_QUEUE, %g4 + cmp %g2, %g4 + be,pn %xcc, sun4v_res_mondo_queue_empty + nop + + /* Get &trap_block[smp_processor_id()] into %g3. */ + ldxa [%g0] ASI_SCRATCHPAD, %g3 + sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 + + /* Get RES mondo queue base phys address into %g5. */ + ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 + + /* Get RES kernel buffer base phys address into %g7. */ + ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7 + + /* If the first word is non-zero, queue is full. */ + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 + brnz,pn %g1, sun4v_res_mondo_queue_full + nop + + lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4 + + /* Remember this entry's offset in %g1. */ + mov %g2, %g1 + + /* Copy 64-byte queue entry into kernel buffer. */ + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + + /* Update queue head pointer. */ + and %g2, %g4, %g2 + + mov INTRQ_RESUM_MONDO_HEAD, %g4 + stxa %g2, [%g4] ASI_QUEUE + membar #Sync + + /* Disable interrupts and save register state so we can call + * C code. The etrap handling will leave %g4 in %l4 for us + * when it's done. + */ + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + mov %g1, %g4 + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + /* Log the event. */ + add %sp, PTREGS_OFF, %o0 + call sun4v_resum_error + mov %l4, %o1 + + /* Return from trap. */ + ba,pt %xcc, rtrap_irq + nop + +sun4v_res_mondo_queue_empty: + retry + +sun4v_res_mondo_queue_full: + /* The queue is full, consolidate our damage by setting + * the head equal to the tail. We'll just trap again otherwise. + * Call C code to log the event. + */ + mov INTRQ_RESUM_MONDO_HEAD, %g2 + stxa %g4, [%g2] ASI_QUEUE + membar #Sync + + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + call sun4v_resum_overflow + add %sp, PTREGS_OFF, %o0 + + ba,pt %xcc, rtrap_irq + nop + +sun4v_nonres_mondo: + /* Head offset in %g2, tail offset in %g4. */ + mov INTRQ_NONRESUM_MONDO_HEAD, %g2 + ldxa [%g2] ASI_QUEUE, %g2 + mov INTRQ_NONRESUM_MONDO_TAIL, %g4 + ldxa [%g4] ASI_QUEUE, %g4 + cmp %g2, %g4 + be,pn %xcc, sun4v_nonres_mondo_queue_empty + nop + + /* Get &trap_block[smp_processor_id()] into %g3. */ + ldxa [%g0] ASI_SCRATCHPAD, %g3 + sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 + + /* Get RES mondo queue base phys address into %g5. */ + ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 + + /* Get RES kernel buffer base phys address into %g7. */ + ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7 + + /* If the first word is non-zero, queue is full. */ + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 + brnz,pn %g1, sun4v_nonres_mondo_queue_full + nop + + lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4 + + /* Remember this entry's offset in %g1. */ + mov %g2, %g1 + + /* Copy 64-byte queue entry into kernel buffer. */ + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + + /* Update queue head pointer. */ + and %g2, %g4, %g2 + + mov INTRQ_NONRESUM_MONDO_HEAD, %g4 + stxa %g2, [%g4] ASI_QUEUE + membar #Sync + + /* Disable interrupts and save register state so we can call + * C code. The etrap handling will leave %g4 in %l4 for us + * when it's done. + */ + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + mov %g1, %g4 + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + /* Log the event. */ + add %sp, PTREGS_OFF, %o0 + call sun4v_nonresum_error + mov %l4, %o1 + + /* Return from trap. */ + ba,pt %xcc, rtrap_irq + nop + +sun4v_nonres_mondo_queue_empty: + retry + +sun4v_nonres_mondo_queue_full: + /* The queue is full, consolidate our damage by setting + * the head equal to the tail. We'll just trap again otherwise. + * Call C code to log the event. + */ + mov INTRQ_NONRESUM_MONDO_HEAD, %g2 + stxa %g4, [%g2] ASI_QUEUE + membar #Sync + + rdpr %pil, %g2 + wrpr %g0, PIL_NORMAL_MAX, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off + nop +#endif + call sun4v_nonresum_overflow + add %sp, PTREGS_OFF, %o0 + + ba,pt %xcc, rtrap_irq + nop diff --git a/arch/sparc/kernel/sun4v_mcd.S b/arch/sparc/kernel/sun4v_mcd.S new file mode 100644 index 000000000..a419b7318 --- /dev/null +++ b/arch/sparc/kernel/sun4v_mcd.S @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* sun4v_mcd.S: Sun4v memory corruption detected precise exception handler + * + * Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved. + * Authors: Bob Picco , + * Khalid Aziz + */ + .text + .align 32 + +sun4v_mcd_detect_precise: + mov %l4, %o1 + mov %l5, %o2 + call sun4v_mem_corrupt_detect_precise + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + nop diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S new file mode 100644 index 000000000..7ac9f3367 --- /dev/null +++ b/arch/sparc/kernel/sun4v_tlb_miss.S @@ -0,0 +1,437 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* sun4v_tlb_miss.S: Sun4v TLB miss handlers. + * + * Copyright (C) 2006 + */ + + .text + .align 32 + + /* Load ITLB fault information into VADDR and CTX, using BASE. */ +#define LOAD_ITLB_INFO(BASE, VADDR, CTX) \ + ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \ + ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX; + + /* Load DTLB fault information into VADDR and CTX, using BASE. */ +#define LOAD_DTLB_INFO(BASE, VADDR, CTX) \ + ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \ + ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX; + + /* DEST = (VADDR >> 22) + * + * Branch to ZERO_CTX_LABEL if context is zero. + */ +#define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \ + srlx VADDR, 22, DEST; \ + brz,pn CTX, ZERO_CTX_LABEL; \ + nop; + + /* Create TSB pointer. This is something like: + * + * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; + * tsb_base = tsb_reg & ~0x7UL; + * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask); + * tsb_ptr = tsb_base + (tsb_index * 16); + */ +#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \ + and TSB_PTR, 0x7, TMP1; \ + mov 512, TMP2; \ + andn TSB_PTR, 0x7, TSB_PTR; \ + sllx TMP2, TMP1, TMP2; \ + srlx VADDR, HASH_SHIFT, TMP1; \ + sub TMP2, 1, TMP2; \ + and TMP1, TMP2, TMP1; \ + sllx TMP1, 4, TMP1; \ + add TSB_PTR, TMP1, TSB_PTR; + +sun4v_itlb_miss: + /* Load MMU Miss base into %g2. */ + ldxa [%g0] ASI_SCRATCHPAD, %g2 + + /* Load UTSB reg into %g1. */ + mov SCRATCHPAD_UTSBREG1, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 + + LOAD_ITLB_INFO(%g2, %g4, %g5) + COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) + COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7) + + /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ + ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 + cmp %g2, %g6 + bne,a,pn %xcc, tsb_miss_page_table_walk + mov FAULT_CODE_ITLB, %g3 + andcc %g3, _PAGE_EXEC_4V, %g0 + be,a,pn %xcc, tsb_do_fault + mov FAULT_CODE_ITLB, %g3 + + /* We have a valid entry, make hypervisor call to load + * I-TLB and return from trap. + * + * %g3: PTE + * %g4: vaddr + */ +sun4v_itlb_load: + ldxa [%g0] ASI_SCRATCHPAD, %g6 + mov %o0, %g1 ! save %o0 + mov %o1, %g2 ! save %o1 + mov %o2, %g5 ! save %o2 + mov %o3, %g7 ! save %o3 + mov %g4, %o0 ! vaddr + ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 ! ctx + mov %g3, %o2 ! PTE + mov HV_MMU_IMMU, %o3 ! flags + ta HV_MMU_MAP_ADDR_TRAP + brnz,pn %o0, sun4v_itlb_error + mov %g2, %o1 ! restore %o1 + mov %g1, %o0 ! restore %o0 + mov %g5, %o2 ! restore %o2 + mov %g7, %o3 ! restore %o3 + + retry + +sun4v_dtlb_miss: + /* Load MMU Miss base into %g2. */ + ldxa [%g0] ASI_SCRATCHPAD, %g2 + + /* Load UTSB reg into %g1. */ + mov SCRATCHPAD_UTSBREG1, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 + + LOAD_DTLB_INFO(%g2, %g4, %g5) + COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) + COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7) + + /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ + ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 + cmp %g2, %g6 + bne,a,pn %xcc, tsb_miss_page_table_walk + mov FAULT_CODE_DTLB, %g3 + + /* We have a valid entry, make hypervisor call to load + * D-TLB and return from trap. + * + * %g3: PTE + * %g4: vaddr + */ +sun4v_dtlb_load: + ldxa [%g0] ASI_SCRATCHPAD, %g6 + mov %o0, %g1 ! save %o0 + mov %o1, %g2 ! save %o1 + mov %o2, %g5 ! save %o2 + mov %o3, %g7 ! save %o3 + mov %g4, %o0 ! vaddr + ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 ! ctx + mov %g3, %o2 ! PTE + mov HV_MMU_DMMU, %o3 ! flags + ta HV_MMU_MAP_ADDR_TRAP + brnz,pn %o0, sun4v_dtlb_error + mov %g2, %o1 ! restore %o1 + mov %g1, %o0 ! restore %o0 + mov %g5, %o2 ! restore %o2 + mov %g7, %o3 ! restore %o3 + + retry + +sun4v_dtlb_prot: + SET_GL(1) + + /* Load MMU Miss base into %g5. */ + ldxa [%g0] ASI_SCRATCHPAD, %g5 + + ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 + rdpr %tl, %g1 + cmp %g1, 1 + bgu,pn %xcc, winfix_trampoline + mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 + ba,pt %xcc, sparc64_realfault_common + nop + + /* Called from trap table: + * %g4: vaddr + * %g5: context + * %g6: TAG TARGET + */ +sun4v_itsb_miss: + mov SCRATCHPAD_UTSBREG1, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 + brz,pn %g5, kvmap_itlb_4v + mov FAULT_CODE_ITLB, %g3 + ba,a,pt %xcc, sun4v_tsb_miss_common + + /* Called from trap table: + * %g4: vaddr + * %g5: context + * %g6: TAG TARGET + */ +sun4v_dtsb_miss: + mov SCRATCHPAD_UTSBREG1, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 + brz,pn %g5, kvmap_dtlb_4v + mov FAULT_CODE_DTLB, %g3 + + /* fallthrough */ + +sun4v_tsb_miss_common: + COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7) + + sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + mov SCRATCHPAD_UTSBREG2, %g5 + ldxa [%g5] ASI_SCRATCHPAD, %g5 + cmp %g5, -1 + be,pt %xcc, 80f + nop + COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7) + + /* That clobbered %g2, reload it. */ + ldxa [%g0] ASI_SCRATCHPAD, %g2 + sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + +80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP] +#endif + + ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath + ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 + +sun4v_itlb_error: + rdpr %tl, %g1 + cmp %g1, 1 + ble,pt %icc, sun4v_bad_ra + or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1 + + sethi %hi(sun4v_err_itlb_vaddr), %g1 + stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)] + sethi %hi(sun4v_err_itlb_ctx), %g1 + ldxa [%g0] ASI_SCRATCHPAD, %g6 + ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 + stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)] + sethi %hi(sun4v_err_itlb_pte), %g1 + stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)] + sethi %hi(sun4v_err_itlb_error), %g1 + stx %o0, [%g1 + %lo(sun4v_err_itlb_error)] + + sethi %hi(1f), %g7 + rdpr %tl, %g4 + ba,pt %xcc, etraptl1 +1: or %g7, %lo(1f), %g7 + mov %l4, %o1 + call sun4v_itlb_error_report + add %sp, PTREGS_OFF, %o0 + + /* NOTREACHED */ + +sun4v_dtlb_error: + rdpr %tl, %g1 + cmp %g1, 1 + ble,pt %icc, sun4v_bad_ra + or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1 + + sethi %hi(sun4v_err_dtlb_vaddr), %g1 + stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)] + sethi %hi(sun4v_err_dtlb_ctx), %g1 + ldxa [%g0] ASI_SCRATCHPAD, %g6 + ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 + stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)] + sethi %hi(sun4v_err_dtlb_pte), %g1 + stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)] + sethi %hi(sun4v_err_dtlb_error), %g1 + stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)] + + sethi %hi(1f), %g7 + rdpr %tl, %g4 + ba,pt %xcc, etraptl1 +1: or %g7, %lo(1f), %g7 + mov %l4, %o1 + call sun4v_dtlb_error_report + add %sp, PTREGS_OFF, %o0 + + /* NOTREACHED */ + +sun4v_bad_ra: + or %g0, %g4, %g5 + ba,pt %xcc, sparc64_realfault_common + or %g1, %g0, %g4 + + /* NOTREACHED */ + + /* Instruction Access Exception, tl0. */ +sun4v_iacc: + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_insn_access_exception + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + + /* Instruction Access Exception, tl1. */ +sun4v_iacc_tl1: + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etraptl1 + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_insn_access_exception_tl1 + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + + /* Data Access Exception, tl0. */ +sun4v_dacc: + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_data_access_exception + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + + /* Data Access Exception, tl1. */ +sun4v_dacc_tl1: + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etraptl1 + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_data_access_exception_tl1 + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + + /* Memory Address Unaligned. */ +sun4v_mna: + /* Window fixup? */ + rdpr %tl, %g2 + cmp %g2, 1 + ble,pt %icc, 1f + nop + + SET_GL(1) + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 + mov HV_FAULT_TYPE_UNALIGNED, %g3 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g4 + sllx %g3, 16, %g3 + or %g4, %g3, %g4 + ba,pt %xcc, winfix_mna + rdpr %tpc, %g3 + /* not reached */ + +1: ldxa [%g0] ASI_SCRATCHPAD, %g2 + mov HV_FAULT_TYPE_UNALIGNED, %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_do_mna + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + nop + + /* Privileged Action. */ +sun4v_privact: + ba,pt %xcc, etrap + rd %pc, %g7 + call do_privact + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + + /* Unaligned ldd float, tl0. */ +sun4v_lddfmna: + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call handle_lddfmna + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + + /* Unaligned std float, tl0. */ +sun4v_stdfmna: + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call handle_stdfmna + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define SUN4V_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl sun4v_patch_tlb_handlers + .type sun4v_patch_tlb_handlers,#function +sun4v_patch_tlb_handlers: + SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss) + SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss) + SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss) + SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss) + SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot) + SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot) + SUN4V_DO_PATCH(tl0_iax, sun4v_iacc) + SUN4V_DO_PATCH(tl1_iax, sun4v_iacc_tl1) + SUN4V_DO_PATCH(tl0_dax, sun4v_dacc) + SUN4V_DO_PATCH(tl1_dax, sun4v_dacc_tl1) + SUN4V_DO_PATCH(tl0_mna, sun4v_mna) + SUN4V_DO_PATCH(tl1_mna, sun4v_mna) + SUN4V_DO_PATCH(tl0_lddfmna, sun4v_lddfmna) + SUN4V_DO_PATCH(tl0_stdfmna, sun4v_stdfmna) + SUN4V_DO_PATCH(tl0_privact, sun4v_privact) + retl + nop + .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S new file mode 100644 index 000000000..a45f0f31f --- /dev/null +++ b/arch/sparc/kernel/sys32.S @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sys32.S: I-cache tricks for 32-bit compatibility layer simple + * conversions. + * + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#include + +/* NOTE: call as jump breaks return stack, we have to avoid that */ + + .text + + .globl sys32_mmap2 +sys32_mmap2: + sethi %hi(sys_mmap), %g1 + jmpl %g1 + %lo(sys_mmap), %g0 + sllx %o5, 12, %o5 + + .align 32 + .globl sys32_socketcall +sys32_socketcall: /* %o0=call, %o1=args */ + cmp %o0, 1 + bl,pn %xcc, do_einval + cmp %o0, 18 + bg,pn %xcc, do_einval + sub %o0, 1, %o0 + sllx %o0, 5, %o0 + sethi %hi(__socketcall_table_begin), %g2 + or %g2, %lo(__socketcall_table_begin), %g2 + jmpl %g2 + %o0, %g0 + nop +do_einval: + retl + mov -EINVAL, %o0 + + .align 32 +__socketcall_table_begin: + + /* Each entry is exactly 32 bytes. */ +do_sys_socket: /* sys_socket(int, int, int) */ +1: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_socket), %g1 +2: ldswa [%o1 + 0x8] %asi, %o2 + jmpl %g1 + %lo(sys_socket), %g0 +3: ldswa [%o1 + 0x4] %asi, %o1 + nop + nop + nop +do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ +4: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_bind), %g1 +5: ldswa [%o1 + 0x8] %asi, %o2 + jmpl %g1 + %lo(sys_bind), %g0 +6: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop + nop +do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ +7: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_connect), %g1 +8: ldswa [%o1 + 0x8] %asi, %o2 + jmpl %g1 + %lo(sys_connect), %g0 +9: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop + nop +do_sys_listen: /* sys_listen(int, int) */ +10: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_listen), %g1 + jmpl %g1 + %lo(sys_listen), %g0 +11: ldswa [%o1 + 0x4] %asi, %o1 + nop + nop + nop + nop +do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ +12: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_accept), %g1 +13: lduwa [%o1 + 0x8] %asi, %o2 + jmpl %g1 + %lo(sys_accept), %g0 +14: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop + nop +do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ +15: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_getsockname), %g1 +16: lduwa [%o1 + 0x8] %asi, %o2 + jmpl %g1 + %lo(sys_getsockname), %g0 +17: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop + nop +do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ +18: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_getpeername), %g1 +19: lduwa [%o1 + 0x8] %asi, %o2 + jmpl %g1 + %lo(sys_getpeername), %g0 +20: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop + nop +do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ +21: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_socketpair), %g1 +22: ldswa [%o1 + 0x8] %asi, %o2 +23: lduwa [%o1 + 0xc] %asi, %o3 + jmpl %g1 + %lo(sys_socketpair), %g0 +24: ldswa [%o1 + 0x4] %asi, %o1 + nop + nop +do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ +25: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_send), %g1 +26: lduwa [%o1 + 0x8] %asi, %o2 +27: lduwa [%o1 + 0xc] %asi, %o3 + jmpl %g1 + %lo(sys_send), %g0 +28: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop +do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ +29: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_recv), %g1 +30: lduwa [%o1 + 0x8] %asi, %o2 +31: lduwa [%o1 + 0xc] %asi, %o3 + jmpl %g1 + %lo(sys_recv), %g0 +32: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop +do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ +33: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_sendto), %g1 +34: lduwa [%o1 + 0x8] %asi, %o2 +35: lduwa [%o1 + 0xc] %asi, %o3 +36: lduwa [%o1 + 0x10] %asi, %o4 +37: ldswa [%o1 + 0x14] %asi, %o5 + jmpl %g1 + %lo(sys_sendto), %g0 +38: lduwa [%o1 + 0x4] %asi, %o1 +do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ +39: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_recvfrom), %g1 +40: lduwa [%o1 + 0x8] %asi, %o2 +41: lduwa [%o1 + 0xc] %asi, %o3 +42: lduwa [%o1 + 0x10] %asi, %o4 +43: lduwa [%o1 + 0x14] %asi, %o5 + jmpl %g1 + %lo(sys_recvfrom), %g0 +44: lduwa [%o1 + 0x4] %asi, %o1 +do_sys_shutdown: /* sys_shutdown(int, int) */ +45: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_shutdown), %g1 + jmpl %g1 + %lo(sys_shutdown), %g0 +46: ldswa [%o1 + 0x4] %asi, %o1 + nop + nop + nop + nop +do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */ +47: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_setsockopt), %g1 +48: ldswa [%o1 + 0x8] %asi, %o2 +49: lduwa [%o1 + 0xc] %asi, %o3 +50: ldswa [%o1 + 0x10] %asi, %o4 + jmpl %g1 + %lo(sys_setsockopt), %g0 +51: ldswa [%o1 + 0x4] %asi, %o1 + nop +do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */ +52: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_getsockopt), %g1 +53: ldswa [%o1 + 0x8] %asi, %o2 +54: lduwa [%o1 + 0xc] %asi, %o3 +55: lduwa [%o1 + 0x10] %asi, %o4 + jmpl %g1 + %lo(sys_getsockopt), %g0 +56: ldswa [%o1 + 0x4] %asi, %o1 + nop +do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ +57: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(compat_sys_sendmsg), %g1 +58: lduwa [%o1 + 0x8] %asi, %o2 + jmpl %g1 + %lo(compat_sys_sendmsg), %g0 +59: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop + nop +do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ +60: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(compat_sys_recvmsg), %g1 +61: lduwa [%o1 + 0x8] %asi, %o2 + jmpl %g1 + %lo(compat_sys_recvmsg), %g0 +62: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop + nop +do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */ +63: ldswa [%o1 + 0x0] %asi, %o0 + sethi %hi(sys_accept4), %g1 +64: lduwa [%o1 + 0x8] %asi, %o2 +65: ldswa [%o1 + 0xc] %asi, %o3 + jmpl %g1 + %lo(sys_accept4), %g0 +66: lduwa [%o1 + 0x4] %asi, %o1 + nop + nop + + .section __ex_table,"a" + .align 4 + .word 1b, __retl_efault, 2b, __retl_efault + .word 3b, __retl_efault, 4b, __retl_efault + .word 5b, __retl_efault, 6b, __retl_efault + .word 7b, __retl_efault, 8b, __retl_efault + .word 9b, __retl_efault, 10b, __retl_efault + .word 11b, __retl_efault, 12b, __retl_efault + .word 13b, __retl_efault, 14b, __retl_efault + .word 15b, __retl_efault, 16b, __retl_efault + .word 17b, __retl_efault, 18b, __retl_efault + .word 19b, __retl_efault, 20b, __retl_efault + .word 21b, __retl_efault, 22b, __retl_efault + .word 23b, __retl_efault, 24b, __retl_efault + .word 25b, __retl_efault, 26b, __retl_efault + .word 27b, __retl_efault, 28b, __retl_efault + .word 29b, __retl_efault, 30b, __retl_efault + .word 31b, __retl_efault, 32b, __retl_efault + .word 33b, __retl_efault, 34b, __retl_efault + .word 35b, __retl_efault, 36b, __retl_efault + .word 37b, __retl_efault, 38b, __retl_efault + .word 39b, __retl_efault, 40b, __retl_efault + .word 41b, __retl_efault, 42b, __retl_efault + .word 43b, __retl_efault, 44b, __retl_efault + .word 45b, __retl_efault, 46b, __retl_efault + .word 47b, __retl_efault, 48b, __retl_efault + .word 49b, __retl_efault, 50b, __retl_efault + .word 51b, __retl_efault, 52b, __retl_efault + .word 53b, __retl_efault, 54b, __retl_efault + .word 55b, __retl_efault, 56b, __retl_efault + .word 57b, __retl_efault, 58b, __retl_efault + .word 59b, __retl_efault, 60b, __retl_efault + .word 61b, __retl_efault, 62b, __retl_efault + .word 63b, __retl_efault, 64b, __retl_efault + .word 65b, __retl_efault, 66b, __retl_efault + .previous diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c new file mode 100644 index 000000000..f84a02ab6 --- /dev/null +++ b/arch/sparc/kernel/sys_sparc32.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +/* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls. + * + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net) + * + * These routines maintain argument size conversion between 32bit and 64bit + * environment. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "systbls.h" + +COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, path, u32, high, u32, low) +{ + return ksys_truncate(path, ((u64)high << 32) | low); +} + +COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd, u32, high, u32, low) +{ + return ksys_ftruncate(fd, ((u64)high << 32) | low); +} + +static int cp_compat_stat64(struct kstat *stat, + struct compat_stat64 __user *statbuf) +{ + int err; + + err = put_user(huge_encode_dev(stat->dev), &statbuf->st_dev); + err |= put_user(stat->ino, &statbuf->st_ino); + err |= put_user(stat->mode, &statbuf->st_mode); + err |= put_user(stat->nlink, &statbuf->st_nlink); + err |= put_user(from_kuid_munged(current_user_ns(), stat->uid), &statbuf->st_uid); + err |= put_user(from_kgid_munged(current_user_ns(), stat->gid), &statbuf->st_gid); + err |= put_user(huge_encode_dev(stat->rdev), &statbuf->st_rdev); + err |= put_user(0, (unsigned long __user *) &statbuf->__pad3[0]); + err |= put_user(stat->size, &statbuf->st_size); + err |= put_user(stat->blksize, &statbuf->st_blksize); + err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[0]); + err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[4]); + err |= put_user(stat->blocks, &statbuf->st_blocks); + err |= put_user(stat->atime.tv_sec, &statbuf->st_atime); + err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec); + err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime); + err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec); + err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime); + err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec); + err |= put_user(0, &statbuf->__unused4); + err |= put_user(0, &statbuf->__unused5); + + return err; +} + +COMPAT_SYSCALL_DEFINE2(stat64, const char __user *, filename, + struct compat_stat64 __user *, statbuf) +{ + struct kstat stat; + int error = vfs_stat(filename, &stat); + + if (!error) + error = cp_compat_stat64(&stat, statbuf); + return error; +} + +COMPAT_SYSCALL_DEFINE2(lstat64, const char __user *, filename, + struct compat_stat64 __user *, statbuf) +{ + struct kstat stat; + int error = vfs_lstat(filename, &stat); + + if (!error) + error = cp_compat_stat64(&stat, statbuf); + return error; +} + +COMPAT_SYSCALL_DEFINE2(fstat64, unsigned int, fd, + struct compat_stat64 __user *, statbuf) +{ + struct kstat stat; + int error = vfs_fstat(fd, &stat); + + if (!error) + error = cp_compat_stat64(&stat, statbuf); + return error; +} + +COMPAT_SYSCALL_DEFINE4(fstatat64, unsigned int, dfd, + const char __user *, filename, + struct compat_stat64 __user *, statbuf, int, flag) +{ + struct kstat stat; + int error; + + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_compat_stat64(&stat, statbuf); +} + +COMPAT_SYSCALL_DEFINE3(sparc_sigaction, int, sig, + struct compat_old_sigaction __user *,act, + struct compat_old_sigaction __user *,oact) +{ + WARN_ON_ONCE(sig >= 0); + return compat_sys_sigaction(-sig, act, oact); +} + +COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig, + struct compat_sigaction __user *,act, + struct compat_sigaction __user *,oact, + void __user *,restorer, + compat_size_t,sigsetsize) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(compat_sigset_t)) + return -EINVAL; + + if (act) { + u32 u_handler, u_restorer; + + new_ka.ka_restorer = restorer; + ret = get_user(u_handler, &act->sa_handler); + new_ka.sa.sa_handler = compat_ptr(u_handler); + ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); + ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); + ret |= get_user(u_restorer, &act->sa_restorer); + new_ka.sa.sa_restorer = compat_ptr(u_restorer); + if (ret) + return -EFAULT; + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); + ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, + sizeof(oact->sa_mask)); + ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); + ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); + if (ret) + ret = -EFAULT; + } + + return ret; +} + +COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, ubuf, + compat_size_t, count, u32, poshi, u32, poslo) +{ + return ksys_pread64(fd, ubuf, count, ((u64)poshi << 32) | poslo); +} + +COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, char __user *, ubuf, + compat_size_t, count, u32, poshi, u32, poslo) +{ + return ksys_pwrite64(fd, ubuf, count, ((u64)poshi << 32) | poslo); +} + +COMPAT_SYSCALL_DEFINE4(readahead, int, fd, u32, offhi, u32, offlo, + compat_size_t, count) +{ + return ksys_readahead(fd, ((u64)offhi << 32) | offlo, count); +} + +COMPAT_SYSCALL_DEFINE5(fadvise64, int, fd, u32, offhi, u32, offlo, + compat_size_t, len, int, advice) +{ + return ksys_fadvise64_64(fd, ((u64)offhi << 32) | offlo, len, advice); +} + +COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, u32, offhi, u32, offlo, + u32, lenhi, u32, lenlo, int, advice) +{ + return ksys_fadvise64_64(fd, + ((u64)offhi << 32) | offlo, + ((u64)lenhi << 32) | lenlo, + advice); +} + +COMPAT_SYSCALL_DEFINE6(sync_file_range, unsigned int, fd, u32, off_high, u32, off_low, + u32, nb_high, u32, nb_low, unsigned int, flags) +{ + return ksys_sync_file_range(fd, + ((u64)off_high << 32) | off_low, + ((u64)nb_high << 32) | nb_low, + flags); +} + +COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, u32, offhi, u32, offlo, + u32, lenhi, u32, lenlo) +{ + return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo, + ((loff_t)lenhi << 32) | lenlo); +} diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c new file mode 100644 index 000000000..082a55189 --- /dev/null +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* linux/arch/sparc/kernel/sys_sparc.c + * + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/sparc + * platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "systbls.h" + +/* #define DEBUG_UNIMP_SYSCALL */ + +/* XXX Make this per-binary type, this way we can detect the type of + * XXX a binary. Every Sparc executable calls this very early on. + */ +SYSCALL_DEFINE0(getpagesize) +{ + return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */ +} + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; + } + + /* See asm-sparc/uaccess.h */ + if (len > TASK_SIZE - PAGE_SIZE) + return -ENOMEM; + if (!addr) + addr = TASK_UNMAPPED_BASE; + + info.flags = 0; + info.length = len; + info.low_limit = addr; + info.high_limit = TASK_SIZE; + info.align_mask = (flags & MAP_SHARED) ? + (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); +} + +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way unix traditionally does this, though. + */ +SYSCALL_DEFINE0(sparc_pipe) +{ + int fd[2]; + int error; + + error = do_pipe_flags(fd, 0); + if (error) + goto out; + current_pt_regs()->u_regs[UREG_I1] = fd[1]; + error = fd[0]; +out: + return error; +} + +int sparc_mmap_check(unsigned long addr, unsigned long len) +{ + /* See asm-sparc/uaccess.h */ + if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE) + return -EINVAL; + + return 0; +} + +/* Linux version of mmap */ + +SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, pgoff) +{ + /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE + we have. */ + return ksys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT - 12)); +} + +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, off) +{ + /* no alignment check? */ + return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); +} + +SYSCALL_DEFINE5(sparc_remap_file_pages, unsigned long, start, unsigned long, size, + unsigned long, prot, unsigned long, pgoff, + unsigned long, flags) +{ + /* This works on an existing mmap so we don't need to validate + * the range as that was done at the original mmap call. + */ + return sys_remap_file_pages(start, size, prot, + (pgoff >> (PAGE_SHIFT - 12)), flags); +} + +SYSCALL_DEFINE0(nis_syscall) +{ + static int count = 0; + struct pt_regs *regs = current_pt_regs(); + + if (count++ > 5) + return -ENOSYS; + printk ("%s[%d]: Unimplemented SPARC system call %d\n", + current->comm, task_pid_nr(current), (int)regs->u_regs[1]); +#ifdef DEBUG_UNIMP_SYSCALL + show_regs (regs); +#endif + return -ENOSYS; +} + +/* #define DEBUG_SPARC_BREAKPOINT */ + +asmlinkage void +sparc_breakpoint (struct pt_regs *regs) +{ + +#ifdef DEBUG_SPARC_BREAKPOINT + printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); +#endif + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); + +#ifdef DEBUG_SPARC_BREAKPOINT + printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); +#endif +} + +SYSCALL_DEFINE3(sparc_sigaction, int, sig, + struct old_sigaction __user *,act, + struct old_sigaction __user *,oact) +{ + WARN_ON_ONCE(sig >= 0); + return sys_sigaction(-sig, act, oact); +} + +SYSCALL_DEFINE5(rt_sigaction, int, sig, + const struct sigaction __user *, act, + struct sigaction __user *, oact, + void __user *, restorer, + size_t, sigsetsize) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (act) { + new_ka.ka_restorer = restorer; + if (copy_from_user(&new_ka.sa, act, sizeof(*act))) + return -EFAULT; + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) + return -EFAULT; + } + + return ret; +} + +SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) +{ + int nlen, err; + char tmp[__NEW_UTS_LEN + 1]; + + if (len < 0) + return -EINVAL; + + down_read(&uts_sem); + + nlen = strlen(utsname()->domainname) + 1; + err = -EINVAL; + if (nlen > len) + goto out_unlock; + memcpy(tmp, utsname()->domainname, nlen); + + up_read(&uts_sem); + + if (copy_to_user(name, tmp, nlen)) + return -EFAULT; + return 0; + +out_unlock: + up_read(&uts_sem); + return err; +} diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c new file mode 100644 index 000000000..1e9a9e016 --- /dev/null +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0 +/* linux/arch/sparc64/kernel/sys_sparc.c + * + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/sparc + * platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "entry.h" +#include "kernel.h" +#include "systbls.h" + +/* #define DEBUG_UNIMP_SYSCALL */ + +SYSCALL_DEFINE0(getpagesize) +{ + return PAGE_SIZE; +} + +/* Does addr --> addr+len fall within 4GB of the VA-space hole or + * overflow past the end of the 64-bit address space? + */ +static inline int invalid_64bit_range(unsigned long addr, unsigned long len) +{ + unsigned long va_exclude_start, va_exclude_end; + + va_exclude_start = VA_EXCLUDE_START; + va_exclude_end = VA_EXCLUDE_END; + + if (unlikely(len >= va_exclude_start)) + return 1; + + if (unlikely((addr + len) < addr)) + return 1; + + if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || + ((addr + len) >= va_exclude_start && + (addr + len) < va_exclude_end))) + return 1; + + return 0; +} + +/* These functions differ from the default implementations in + * mm/mmap.c in two ways: + * + * 1) For file backed MAP_SHARED mmap()'s we D-cache color align, + * for fixed such mappings we just validate what the user gave us. + * 2) For 64-bit tasks we avoid mapping anything within 4GB of + * the spitfire/niagara VA-hole. + */ + +static inline unsigned long COLOR_ALIGN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); + unsigned long off = (pgoff<mm; + struct vm_area_struct * vma; + unsigned long task_size = TASK_SIZE; + int do_color_align; + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; + } + + if (test_thread_flag(TIF_32BIT)) + task_size = STACK_TOP32; + if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) + return -ENOMEM; + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + + if (addr) { + if (do_color_align) + addr = COLOR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (task_size - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = min(task_size, VA_EXCLUDE_START); + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); + + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { + VM_BUG_ON(addr != -ENOMEM); + info.low_limit = VA_EXCLUDE_END; + info.high_limit = task_size; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long task_size = STACK_TOP32; + unsigned long addr = addr0; + int do_color_align; + struct vm_unmapped_area_info info; + + /* This should only ever run for 32-bit processes. */ + BUG_ON(!test_thread_flag(TIF_32BIT)); + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; + } + + if (unlikely(len > task_size)) + return -ENOMEM; + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + + /* requesting a specific address */ + if (addr) { + if (do_color_align) + addr = COLOR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (task_size - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = STACK_TOP32; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +/* Try to align mapping such that we align it as much as possible. */ +unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) +{ + unsigned long align_goal, addr = -ENOMEM; + unsigned long (*get_area)(struct file *, unsigned long, + unsigned long, unsigned long, unsigned long); + + get_area = current->mm->get_unmapped_area; + + if (flags & MAP_FIXED) { + /* Ok, don't mess with it. */ + return get_area(NULL, orig_addr, len, pgoff, flags); + } + flags &= ~MAP_SHARED; + + align_goal = PAGE_SIZE; + if (len >= (4UL * 1024 * 1024)) + align_goal = (4UL * 1024 * 1024); + else if (len >= (512UL * 1024)) + align_goal = (512UL * 1024); + else if (len >= (64UL * 1024)) + align_goal = (64UL * 1024); + + do { + addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); + if (!(addr & ~PAGE_MASK)) { + addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); + break; + } + + if (align_goal == (4UL * 1024 * 1024)) + align_goal = (512UL * 1024); + else if (align_goal == (512UL * 1024)) + align_goal = (64UL * 1024); + else + align_goal = PAGE_SIZE; + } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE); + + /* Mapping is smaller than 64K or larger areas could not + * be obtained. + */ + if (addr & ~PAGE_MASK) + addr = get_area(NULL, orig_addr, len, pgoff, flags); + + return addr; +} +EXPORT_SYMBOL(get_fb_unmapped_area); + +/* Essentially the same as PowerPC. */ +static unsigned long mmap_rnd(void) +{ + unsigned long rnd = 0UL; + + if (current->flags & PF_RANDOMIZE) { + unsigned long val = get_random_long(); + if (test_thread_flag(TIF_32BIT)) + rnd = (val % (1UL << (23UL-PAGE_SHIFT))); + else + rnd = (val % (1UL << (30UL-PAGE_SHIFT))); + } + return rnd << PAGE_SHIFT; +} + +void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +{ + unsigned long random_factor = mmap_rnd(); + unsigned long gap; + + /* + * Fall back to the standard layout if the personality + * bit is set, or if the expected stack growth is unlimited: + */ + gap = rlim_stack->rlim_cur; + if (!test_thread_flag(TIF_32BIT) || + (current->personality & ADDR_COMPAT_LAYOUT) || + gap == RLIM_INFINITY || + sysctl_legacy_va_layout) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; + } else { + /* We know it's 32-bit */ + unsigned long task_size = STACK_TOP32; + + if (gap < 128 * 1024 * 1024) + gap = 128 * 1024 * 1024; + if (gap > (task_size / 6 * 5)) + gap = (task_size / 6 * 5); + + mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + } +} + +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way unix traditionally does this, though. + */ +SYSCALL_DEFINE0(sparc_pipe) +{ + int fd[2]; + int error; + + error = do_pipe_flags(fd, 0); + if (error) + goto out; + current_pt_regs()->u_regs[UREG_I1] = fd[1]; + error = fd[0]; +out: + return error; +} + +/* + * sys_ipc() is the de-multiplexer for the SysV IPC calls.. + * + * This is really horribly ugly. + */ + +SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second, + unsigned long, third, void __user *, ptr, long, fifth) +{ + long err; + + if (!IS_ENABLED(CONFIG_SYSVIPC)) + return -ENOSYS; + + /* No need for backward compatibility. We can start fresh... */ + if (call <= SEMTIMEDOP) { + switch (call) { + case SEMOP: + err = ksys_semtimedop(first, ptr, + (unsigned int)second, NULL); + goto out; + case SEMTIMEDOP: + err = ksys_semtimedop(first, ptr, (unsigned int)second, + (const struct __kernel_timespec __user *) + (unsigned long) fifth); + goto out; + case SEMGET: + err = ksys_semget(first, (int)second, (int)third); + goto out; + case SEMCTL: { + err = ksys_old_semctl(first, second, + (int)third | IPC_64, + (unsigned long) ptr); + goto out; + } + default: + err = -ENOSYS; + goto out; + } + } + if (call <= MSGCTL) { + switch (call) { + case MSGSND: + err = ksys_msgsnd(first, ptr, (size_t)second, + (int)third); + goto out; + case MSGRCV: + err = ksys_msgrcv(first, ptr, (size_t)second, fifth, + (int)third); + goto out; + case MSGGET: + err = ksys_msgget((key_t)first, (int)second); + goto out; + case MSGCTL: + err = ksys_old_msgctl(first, (int)second | IPC_64, ptr); + goto out; + default: + err = -ENOSYS; + goto out; + } + } + if (call <= SHMCTL) { + switch (call) { + case SHMAT: { + ulong raddr; + err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA); + if (!err) { + if (put_user(raddr, + (ulong __user *) third)) + err = -EFAULT; + } + goto out; + } + case SHMDT: + err = ksys_shmdt(ptr); + goto out; + case SHMGET: + err = ksys_shmget(first, (size_t)second, (int)third); + goto out; + case SHMCTL: + err = ksys_old_shmctl(first, (int)second | IPC_64, ptr); + goto out; + default: + err = -ENOSYS; + goto out; + } + } else { + err = -ENOSYS; + } +out: + return err; +} + +SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) +{ + long ret; + + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; + ret = sys_personality(personality); + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; + + return ret; +} + +int sparc_mmap_check(unsigned long addr, unsigned long len) +{ + if (test_thread_flag(TIF_32BIT)) { + if (len >= STACK_TOP32) + return -EINVAL; + + if (addr > STACK_TOP32 - len) + return -EINVAL; + } else { + if (len >= VA_EXCLUDE_START) + return -EINVAL; + + if (invalid_64bit_range(addr, len)) + return -EINVAL; + } + + return 0; +} + +/* Linux version of mmap */ +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, off) +{ + unsigned long retval = -EINVAL; + + if ((off + PAGE_ALIGN(len)) < off) + goto out; + if (off & ~PAGE_MASK) + goto out; + retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); +out: + return retval; +} + +SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len) +{ + if (invalid_64bit_range(addr, len)) + return -EINVAL; + + return vm_munmap(addr, len); +} + +SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, + unsigned long, new_len, unsigned long, flags, + unsigned long, new_addr) +{ + if (test_thread_flag(TIF_32BIT)) + return -EINVAL; + return sys_mremap(addr, old_len, new_len, flags, new_addr); +} + +SYSCALL_DEFINE0(nis_syscall) +{ + static int count; + struct pt_regs *regs = current_pt_regs(); + + /* Don't make the system unusable, if someone goes stuck */ + if (count++ > 5) + return -ENOSYS; + + printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]); +#ifdef DEBUG_UNIMP_SYSCALL + show_regs (regs); +#endif + + return -ENOSYS; +} + +/* #define DEBUG_SPARC_BREAKPOINT */ + +asmlinkage void sparc_breakpoint(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } +#ifdef DEBUG_SPARC_BREAKPOINT + printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc); +#endif + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc); +#ifdef DEBUG_SPARC_BREAKPOINT + printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); +#endif + exception_exit(prev_state); +} + +SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) +{ + int nlen, err; + char tmp[__NEW_UTS_LEN + 1]; + + if (len < 0) + return -EINVAL; + + down_read(&uts_sem); + + nlen = strlen(utsname()->domainname) + 1; + err = -EINVAL; + if (nlen > len) + goto out_unlock; + memcpy(tmp, utsname()->domainname, nlen); + + up_read(&uts_sem); + + if (copy_to_user(name, tmp, nlen)) + return -EFAULT; + return 0; + +out_unlock: + up_read(&uts_sem); + return err; +} + +SYSCALL_DEFINE1(sparc_adjtimex, struct __kernel_timex __user *, txc_p) +{ + struct __kernel_timex txc; + struct __kernel_old_timeval *tv = (void *)&txc.time; + int ret; + + /* Copy the user data space into the kernel copy + * structure. But bear in mind that the structures + * may change + */ + if (copy_from_user(&txc, txc_p, sizeof(txc))) + return -EFAULT; + + /* + * override for sparc64 specific timeval type: tv_usec + * is 32 bit wide instead of 64-bit in __kernel_timex + */ + txc.time.tv_usec = tv->tv_usec; + ret = do_adjtimex(&txc); + tv->tv_usec = txc.time.tv_usec; + + return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret; +} + +SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock, + struct __kernel_timex __user *, txc_p) +{ + struct __kernel_timex txc; + struct __kernel_old_timeval *tv = (void *)&txc.time; + int ret; + + if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) { + pr_err_once("process %d (%s) attempted a POSIX timer syscall " + "while CONFIG_POSIX_TIMERS is not set\n", + current->pid, current->comm); + + return -ENOSYS; + } + + /* Copy the user data space into the kernel copy + * structure. But bear in mind that the structures + * may change + */ + if (copy_from_user(&txc, txc_p, sizeof(txc))) + return -EFAULT; + + /* + * override for sparc64 specific timeval type: tv_usec + * is 32 bit wide instead of 64-bit in __kernel_timex + */ + txc.time.tv_usec = tv->tv_usec; + ret = do_clock_adjtime(which_clock, &txc); + tv->tv_usec = txc.time.tv_usec; + + return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret; +} + +SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type, + utrap_handler_t, new_p, utrap_handler_t, new_d, + utrap_handler_t __user *, old_p, + utrap_handler_t __user *, old_d) +{ + if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31) + return -EINVAL; + if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) { + if (old_p) { + if (!current_thread_info()->utraps) { + if (put_user(NULL, old_p)) + return -EFAULT; + } else { + if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) + return -EFAULT; + } + } + if (old_d) { + if (put_user(NULL, old_d)) + return -EFAULT; + } + return 0; + } + if (!current_thread_info()->utraps) { + current_thread_info()->utraps = + kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long), + GFP_KERNEL); + if (!current_thread_info()->utraps) + return -ENOMEM; + current_thread_info()->utraps[0] = 1; + } else { + if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && + current_thread_info()->utraps[0] > 1) { + unsigned long *p = current_thread_info()->utraps; + + current_thread_info()->utraps = + kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1, + sizeof(long), + GFP_KERNEL); + if (!current_thread_info()->utraps) { + current_thread_info()->utraps = p; + return -ENOMEM; + } + p[0]--; + current_thread_info()->utraps[0] = 1; + memcpy(current_thread_info()->utraps+1, p+1, + UT_TRAP_INSTRUCTION_31*sizeof(long)); + } + } + if (old_p) { + if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) + return -EFAULT; + } + if (old_d) { + if (put_user(NULL, old_d)) + return -EFAULT; + } + current_thread_info()->utraps[type] = (long)new_p; + + return 0; +} + +SYSCALL_DEFINE1(memory_ordering, unsigned long, model) +{ + struct pt_regs *regs = current_pt_regs(); + if (model >= 3) + return -EINVAL; + regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14); + return 0; +} + +SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, + struct sigaction __user *, oact, void __user *, restorer, + size_t, sigsetsize) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (act) { + new_ka.ka_restorer = restorer; + if (copy_from_user(&new_ka.sa, act, sizeof(*act))) + return -EFAULT; + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) + return -EFAULT; + } + + return ret; +} + +SYSCALL_DEFINE0(kern_features) +{ + return KERN_FEATURE_MIXED_MODE_STACK; +} diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S new file mode 100644 index 000000000..0e8ab0602 --- /dev/null +++ b/arch/sparc/kernel/syscalls.S @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + /* SunOS's execv() call only specifies the argv argument, the + * environment settings are the same as the calling processes. + */ +sys64_execve: + set sys_execve, %g1 + jmpl %g1, %g0 + flushw + +sys64_execveat: + set sys_execveat, %g1 + jmpl %g1, %g0 + flushw + +#ifdef CONFIG_COMPAT +sunos_execv: + mov %g0, %o2 +sys32_execve: + set compat_sys_execve, %g1 + jmpl %g1, %g0 + flushw + +sys32_execveat: + set compat_sys_execveat, %g1 + jmpl %g1, %g0 + flushw +#endif + + .align 32 +#ifdef CONFIG_COMPAT +sys32_sigstack: + ba,pt %xcc, do_sys32_sigstack + mov %i6, %o2 +#endif + .align 32 +#ifdef CONFIG_COMPAT +sys32_sigreturn: + add %sp, PTREGS_OFF, %o0 + call do_sigreturn32 + add %o7, 1f-.-4, %o7 + nop +#endif +sys_rt_sigreturn: + add %sp, PTREGS_OFF, %o0 + call do_rt_sigreturn + add %o7, 1f-.-4, %o7 + nop +#ifdef CONFIG_COMPAT +sys32_rt_sigreturn: + add %sp, PTREGS_OFF, %o0 + call do_rt_sigreturn32 + add %o7, 1f-.-4, %o7 + nop +#endif + .align 32 +1: ldx [%g6 + TI_FLAGS], %l5 + andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + be,pt %icc, rtrap + nop + call syscall_trace_leave + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + + /* This is how fork() was meant to be done, 8 instruction entry. + * + * I questioned the following code briefly, let me clear things + * up so you must not reason on it like I did. + * + * Know the fork_kpsr etc. we use in the sparc32 port? We don't + * need it here because the only piece of window state we copy to + * the child is the CWP register. Even if the parent sleeps, + * we are safe because we stuck it into pt_regs of the parent + * so it will not change. + * + * XXX This raises the question, whether we can do the same on + * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The + * XXX answer is yes. We stick fork_kpsr in UREG_G0 and + * XXX fork_kwim in UREG_G1 (global registers are considered + * XXX volatile across a system call in the sparc ABI I think + * XXX if it isn't we can use regs->y instead, anyone who depends + * XXX upon the Y register being preserved across a fork deserves + * XXX to lose). + * + * In fact we should take advantage of that fact for other things + * during system calls... + */ + .align 32 +sys_vfork: + flushw + ba,pt %xcc, sparc_vfork + add %sp, PTREGS_OFF, %o0 + + .align 32 +sys_fork: + flushw + ba,pt %xcc, sparc_fork + add %sp, PTREGS_OFF, %o0 + + .align 32 +sys_clone: + flushw + ba,pt %xcc, sparc_clone + add %sp, PTREGS_OFF, %o0 + + .globl ret_from_fork +ret_from_fork: + /* Clear current_thread_info()->new_child. */ + stb %g0, [%g6 + TI_NEW_CHILD] + call schedule_tail + mov %g7, %o0 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 + brnz,pt %o0, ret_sys_call + ldx [%g6 + TI_FLAGS], %l0 + ldx [%sp + PTREGS_OFF + PT_V9_G1], %l1 + call %l1 + ldx [%sp + PTREGS_OFF + PT_V9_G2], %o0 + ba,pt %xcc, ret_sys_call + mov 0, %o0 + + .globl sparc_exit_group + .type sparc_exit_group,#function +sparc_exit_group: + sethi %hi(sys_exit_group), %g7 + ba,pt %xcc, 1f + or %g7, %lo(sys_exit_group), %g7 + .size sparc_exit_group,.-sparc_exit_group + + .globl sparc_exit + .type sparc_exit,#function +sparc_exit: + sethi %hi(sys_exit), %g7 + or %g7, %lo(sys_exit), %g7 +1: rdpr %pstate, %g2 + wrpr %g2, PSTATE_IE, %pstate + rdpr %otherwin, %g1 + rdpr %cansave, %g3 + add %g3, %g1, %g3 + wrpr %g3, 0x0, %cansave + wrpr %g0, 0x0, %otherwin + wrpr %g2, 0x0, %pstate + jmpl %g7, %g0 + stb %g0, [%g6 + TI_WSAVED] + .size sparc_exit,.-sparc_exit + +linux_sparc_ni_syscall: + sethi %hi(sys_ni_syscall), %l7 + ba,pt %xcc, 4f + or %l7, %lo(sys_ni_syscall), %l7 + +linux_syscall_trace32: + call syscall_trace_enter + add %sp, PTREGS_OFF, %o0 + brnz,pn %o0, 3f + mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 + sethi %hi(sys_call_table32), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 + or %l7, %lo(sys_call_table32), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 + ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 + ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 + ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 + ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 + + cmp %g1, NR_syscalls + bgeu,pn %xcc, 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 + srl %i0, 0, %o0 + lduw [%l7 + %l4], %l7 + srl %i4, 0, %o4 + srl %i1, 0, %o1 + srl %i2, 0, %o2 + ba,pt %xcc, 5f + srl %i3, 0, %o3 + +linux_syscall_trace: + call syscall_trace_enter + add %sp, PTREGS_OFF, %o0 + brnz,pn %o0, 3f + mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 + sethi %hi(sys_call_table64), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 + or %l7, %lo(sys_call_table64), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 + ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 + ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 + ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 + ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 + + cmp %g1, NR_syscalls + bgeu,pn %xcc, 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 + mov %i0, %o0 + lduw [%l7 + %l4], %l7 + mov %i1, %o1 + mov %i2, %o2 + mov %i3, %o3 + b,pt %xcc, 2f + mov %i4, %o4 + + + /* Linux 32-bit system calls enter here... */ + .align 32 + .globl linux_sparc_syscall32 +linux_sparc_syscall32: + /* Direct access to user regs, much faster. */ + cmp %g1, NR_syscalls ! IEU1 Group + bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI + srl %i0, 0, %o0 ! IEU0 + sll %g1, 2, %l4 ! IEU0 Group + srl %i4, 0, %o4 ! IEU1 + lduw [%l7 + %l4], %l7 ! Load + srl %i1, 0, %o1 ! IEU0 Group + ldx [%g6 + TI_FLAGS], %l0 ! Load + + srl %i3, 0, %o3 ! IEU0 + srl %i2, 0, %o2 ! IEU0 Group + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + bne,pn %icc, linux_syscall_trace32 ! CTI + mov %i0, %l5 ! IEU1 +5: call %l7 ! CTI Group brk forced + srl %i5, 0, %o5 ! IEU1 + ba,pt %xcc, 3f + sra %o0, 0, %o0 + + /* Linux native system calls enter here... */ + .align 32 + .globl linux_sparc_syscall +linux_sparc_syscall: + /* Direct access to user regs, much faster. */ + cmp %g1, NR_syscalls ! IEU1 Group + bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI + mov %i0, %o0 ! IEU0 + sll %g1, 2, %l4 ! IEU0 Group + mov %i1, %o1 ! IEU1 + lduw [%l7 + %l4], %l7 ! Load +4: mov %i2, %o2 ! IEU0 Group + ldx [%g6 + TI_FLAGS], %l0 ! Load + + mov %i3, %o3 ! IEU1 + mov %i4, %o4 ! IEU0 Group + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + bne,pn %icc, linux_syscall_trace ! CTI Group + mov %i0, %l5 ! IEU0 +2: call %l7 ! CTI Group brk forced + mov %i5, %o5 ! IEU0 + nop + +3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] +ret_sys_call: + ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 + mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 + sllx %g2, 32, %g2 + + cmp %o0, -ERESTART_RESTARTBLOCK + bgeu,pn %xcc, 1f + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc + +2: + /* System call success, clear Carry condition code. */ + andn %g3, %g2, %g3 +3: + stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] + bne,pn %icc, linux_syscall_trace2 + add %l1, 0x4, %l2 ! npc = npc+4 + stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] + ba,pt %xcc, rtrap + stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] + +1: + /* Check if force_successful_syscall_return() + * was invoked. + */ + ldub [%g6 + TI_SYS_NOERROR], %l2 + brnz,pn %l2, 2b + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc + /* System call failure, set Carry condition code. + * Also, get abs(errno) to return to the process. + */ + sub %g0, %o0, %o0 + stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] + ba,pt %xcc, 3b + or %g3, %g2, %g3 + +linux_syscall_trace2: + call syscall_trace_leave + add %sp, PTREGS_OFF, %o0 + stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] + ba,pt %xcc, rtrap + stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile new file mode 100644 index 000000000..8440c16df --- /dev/null +++ b/arch/sparc/kernel/syscalls/Makefile @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +$(shell mkdir -p $(uapi) $(kapi)) + +syscall := $(src)/syscall.tbl +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis common,$* $< $@ + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@ + +$(uapi)/unistd_%.h: $(syscall) $(syshdr) FORCE + $(call if_changed,syshdr) + +$(kapi)/syscall_table_%.h: $(syscall) $(systbl) FORCE + $(call if_changed,systbl) + +uapisyshdr-y += unistd_32.h unistd_64.h +kapisyshdr-y += syscall_table_32.h \ + syscall_table_64.h + +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) + +PHONY += all +all: $(uapisyshdr-y) $(kapisyshdr-y) + @: diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000..4398cc6fb --- /dev/null +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -0,0 +1,498 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sparc +# +# The format is: +# +# +# The can be common, 64, or 32 for this file. +# +0 common restart_syscall sys_restart_syscall +1 32 exit sys_exit sparc_exit +1 64 exit sparc_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +5 common open sys_open compat_sys_open +6 common close sys_close +7 common wait4 sys_wait4 compat_sys_wait4 +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 32 execv sunos_execv +11 64 execv sys_nis_syscall +12 common chdir sys_chdir +13 32 chown sys_chown16 +13 64 chown sys_chown +14 common mknod sys_mknod +15 common chmod sys_chmod +16 32 lchown sys_lchown16 +16 64 lchown sys_lchown +17 common brk sys_brk +18 common perfctr sys_nis_syscall +19 common lseek sys_lseek compat_sys_lseek +20 common getpid sys_getpid +21 common capget sys_capget +22 common capset sys_capset +23 32 setuid sys_setuid16 +23 64 setuid sys_setuid +24 32 getuid sys_getuid16 +24 64 getuid sys_getuid +25 common vmsplice sys_vmsplice +26 common ptrace sys_ptrace compat_sys_ptrace +27 common alarm sys_alarm +28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack +29 32 pause sys_pause +29 64 pause sys_nis_syscall +30 32 utime sys_utime32 +30 64 utime sys_utime +31 32 lchown32 sys_lchown +32 32 fchown32 sys_fchown +33 common access sys_access +34 common nice sys_nice +35 32 chown32 sys_chown +36 common sync sys_sync +37 common kill sys_kill +38 common stat sys_newstat compat_sys_newstat +39 32 sendfile sys_sendfile compat_sys_sendfile +39 64 sendfile sys_sendfile64 +40 common lstat sys_newlstat compat_sys_newlstat +41 common dup sys_dup +42 common pipe sys_sparc_pipe +43 common times sys_times compat_sys_times +44 32 getuid32 sys_getuid +45 common umount2 sys_umount +46 32 setgid sys_setgid16 +46 64 setgid sys_setgid +47 32 getgid sys_getgid16 +47 64 getgid sys_getgid +48 common signal sys_signal +49 32 geteuid sys_geteuid16 +49 64 geteuid sys_geteuid +50 32 getegid sys_getegid16 +50 64 getegid sys_getegid +51 common acct sys_acct +52 64 memory_ordering sys_memory_ordering +53 32 getgid32 sys_getgid +54 common ioctl sys_ioctl compat_sys_ioctl +55 common reboot sys_reboot +56 32 mmap2 sys_mmap2 sys32_mmap2 +57 common symlink sys_symlink +58 common readlink sys_readlink +59 32 execve sys_execve sys32_execve +59 64 execve sys64_execve +60 common umask sys_umask +61 common chroot sys_chroot +62 common fstat sys_newfstat compat_sys_newfstat +63 common fstat64 sys_fstat64 compat_sys_fstat64 +64 common getpagesize sys_getpagesize +65 common msync sys_msync +66 common vfork sys_vfork +67 common pread64 sys_pread64 compat_sys_pread64 +68 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +69 32 geteuid32 sys_geteuid +70 32 getegid32 sys_getegid +71 common mmap sys_mmap +72 32 setreuid32 sys_setreuid +73 32 munmap sys_munmap +73 64 munmap sys_64_munmap +74 common mprotect sys_mprotect +75 common madvise sys_madvise +76 common vhangup sys_vhangup +77 32 truncate64 sys_truncate64 compat_sys_truncate64 +78 common mincore sys_mincore +79 32 getgroups sys_getgroups16 +79 64 getgroups sys_getgroups +80 32 setgroups sys_setgroups16 +80 64 setgroups sys_setgroups +81 common getpgrp sys_getpgrp +82 32 setgroups32 sys_setgroups +83 common setitimer sys_setitimer compat_sys_setitimer +84 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +85 common swapon sys_swapon +86 common getitimer sys_getitimer compat_sys_getitimer +87 32 setuid32 sys_setuid +88 common sethostname sys_sethostname +89 32 setgid32 sys_setgid +90 common dup2 sys_dup2 +91 32 setfsuid32 sys_setfsuid +92 common fcntl sys_fcntl compat_sys_fcntl +93 common select sys_select +94 32 setfsgid32 sys_setfsgid +95 common fsync sys_fsync +96 common setpriority sys_setpriority +97 common socket sys_socket +98 common connect sys_connect +99 common accept sys_accept +100 common getpriority sys_getpriority +101 common rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn +102 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +103 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +104 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +105 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +105 64 rt_sigtimedwait sys_rt_sigtimedwait +106 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +107 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +108 32 setresuid32 sys_setresuid +108 64 setresuid sys_setresuid +109 32 getresuid32 sys_getresuid +109 64 getresuid sys_getresuid +110 32 setresgid32 sys_setresgid +110 64 setresgid sys_setresgid +111 32 getresgid32 sys_getresgid +111 64 getresgid sys_getresgid +112 32 setregid32 sys_setregid +113 common recvmsg sys_recvmsg compat_sys_recvmsg +114 common sendmsg sys_sendmsg compat_sys_sendmsg +115 32 getgroups32 sys_getgroups +116 common gettimeofday sys_gettimeofday compat_sys_gettimeofday +117 common getrusage sys_getrusage compat_sys_getrusage +118 common getsockopt sys_getsockopt sys_getsockopt +119 common getcwd sys_getcwd +120 common readv sys_readv +121 common writev sys_writev +122 common settimeofday sys_settimeofday compat_sys_settimeofday +123 32 fchown sys_fchown16 +123 64 fchown sys_fchown +124 common fchmod sys_fchmod +125 common recvfrom sys_recvfrom +126 32 setreuid sys_setreuid16 +126 64 setreuid sys_setreuid +127 32 setregid sys_setregid16 +127 64 setregid sys_setregid +128 common rename sys_rename +129 common truncate sys_truncate compat_sys_truncate +130 common ftruncate sys_ftruncate compat_sys_ftruncate +131 common flock sys_flock +132 common lstat64 sys_lstat64 compat_sys_lstat64 +133 common sendto sys_sendto +134 common shutdown sys_shutdown +135 common socketpair sys_socketpair +136 common mkdir sys_mkdir +137 common rmdir sys_rmdir +138 32 utimes sys_utimes_time32 +138 64 utimes sys_utimes +139 common stat64 sys_stat64 compat_sys_stat64 +140 common sendfile64 sys_sendfile64 +141 common getpeername sys_getpeername +142 32 futex sys_futex_time32 +142 64 futex sys_futex +143 common gettid sys_gettid +144 common getrlimit sys_getrlimit compat_sys_getrlimit +145 common setrlimit sys_setrlimit compat_sys_setrlimit +146 common pivot_root sys_pivot_root +147 common prctl sys_prctl +148 common pciconfig_read sys_pciconfig_read +149 common pciconfig_write sys_pciconfig_write +150 common getsockname sys_getsockname +151 common inotify_init sys_inotify_init +152 common inotify_add_watch sys_inotify_add_watch +153 common poll sys_poll +154 common getdents64 sys_getdents64 +155 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +156 common inotify_rm_watch sys_inotify_rm_watch +157 common statfs sys_statfs compat_sys_statfs +158 common fstatfs sys_fstatfs compat_sys_fstatfs +159 common umount sys_oldumount +160 common sched_set_affinity sys_sched_setaffinity compat_sys_sched_setaffinity +161 common sched_get_affinity sys_sched_getaffinity compat_sys_sched_getaffinity +162 common getdomainname sys_getdomainname +163 common setdomainname sys_setdomainname +164 64 utrap_install sys_utrap_install +165 common quotactl sys_quotactl +166 common set_tid_address sys_set_tid_address +167 common mount sys_mount +168 common ustat sys_ustat compat_sys_ustat +169 common setxattr sys_setxattr +170 common lsetxattr sys_lsetxattr +171 common fsetxattr sys_fsetxattr +172 common getxattr sys_getxattr +173 common lgetxattr sys_lgetxattr +174 common getdents sys_getdents compat_sys_getdents +175 common setsid sys_setsid +176 common fchdir sys_fchdir +177 common fgetxattr sys_fgetxattr +178 common listxattr sys_listxattr +179 common llistxattr sys_llistxattr +180 common flistxattr sys_flistxattr +181 common removexattr sys_removexattr +182 common lremovexattr sys_lremovexattr +183 32 sigpending sys_sigpending compat_sys_sigpending +183 64 sigpending sys_nis_syscall +184 common query_module sys_ni_syscall +185 common setpgid sys_setpgid +186 common fremovexattr sys_fremovexattr +187 common tkill sys_tkill +188 32 exit_group sys_exit_group sparc_exit_group +188 64 exit_group sparc_exit_group +189 common uname sys_newuname +190 common init_module sys_init_module +191 32 personality sys_personality sys_sparc64_personality +191 64 personality sys_sparc64_personality +192 32 remap_file_pages sys_sparc_remap_file_pages sys_remap_file_pages +192 64 remap_file_pages sys_remap_file_pages +193 common epoll_create sys_epoll_create +194 common epoll_ctl sys_epoll_ctl +195 common epoll_wait sys_epoll_wait +196 common ioprio_set sys_ioprio_set +197 common getppid sys_getppid +198 32 sigaction sys_sparc_sigaction compat_sys_sparc_sigaction +198 64 sigaction sys_nis_syscall +199 common sgetmask sys_sgetmask +200 common ssetmask sys_ssetmask +201 32 sigsuspend sys_sigsuspend +201 64 sigsuspend sys_nis_syscall +202 common oldlstat sys_newlstat compat_sys_newlstat +203 common uselib sys_uselib +204 32 readdir sys_old_readdir compat_sys_old_readdir +204 64 readdir sys_nis_syscall +205 common readahead sys_readahead compat_sys_readahead +206 common socketcall sys_socketcall sys32_socketcall +207 common syslog sys_syslog +208 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +209 common fadvise64 sys_fadvise64 compat_sys_fadvise64 +210 common fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64 +211 common tgkill sys_tgkill +212 common waitpid sys_waitpid +213 common swapoff sys_swapoff +214 common sysinfo sys_sysinfo compat_sys_sysinfo +215 32 ipc sys_ipc compat_sys_ipc +215 64 ipc sys_sparc_ipc +216 32 sigreturn sys_sigreturn sys32_sigreturn +216 64 sigreturn sys_nis_syscall +217 common clone sys_clone +218 common ioprio_get sys_ioprio_get +219 32 adjtimex sys_adjtimex_time32 +219 64 adjtimex sys_sparc_adjtimex +220 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask +220 64 sigprocmask sys_nis_syscall +221 common create_module sys_ni_syscall +222 common delete_module sys_delete_module +223 common get_kernel_syms sys_ni_syscall +224 common getpgid sys_getpgid +225 common bdflush sys_ni_syscall +226 common sysfs sys_sysfs +227 common afs_syscall sys_nis_syscall +228 common setfsuid sys_setfsuid16 +229 common setfsgid sys_setfsgid16 +230 common _newselect sys_select compat_sys_select +231 32 time sys_time32 +232 common splice sys_splice +233 32 stime sys_stime32 +233 64 stime sys_stime +234 common statfs64 sys_statfs64 compat_sys_statfs64 +235 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +236 common _llseek sys_llseek +237 common mlock sys_mlock +238 common munlock sys_munlock +239 common mlockall sys_mlockall +240 common munlockall sys_munlockall +241 common sched_setparam sys_sched_setparam +242 common sched_getparam sys_sched_getparam +243 common sched_setscheduler sys_sched_setscheduler +244 common sched_getscheduler sys_sched_getscheduler +245 common sched_yield sys_sched_yield +246 common sched_get_priority_max sys_sched_get_priority_max +247 common sched_get_priority_min sys_sched_get_priority_min +248 32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +248 64 sched_rr_get_interval sys_sched_rr_get_interval +249 32 nanosleep sys_nanosleep_time32 +249 64 nanosleep sys_nanosleep +250 32 mremap sys_mremap +250 64 mremap sys_64_mremap +251 common _sysctl sys_ni_syscall +252 common getsid sys_getsid +253 common fdatasync sys_fdatasync +254 32 nfsservctl sys_ni_syscall sys_nis_syscall +254 64 nfsservctl sys_nis_syscall +255 common sync_file_range sys_sync_file_range compat_sys_sync_file_range +256 32 clock_settime sys_clock_settime32 +256 64 clock_settime sys_clock_settime +257 32 clock_gettime sys_clock_gettime32 +257 64 clock_gettime sys_clock_gettime +258 32 clock_getres sys_clock_getres_time32 +258 64 clock_getres sys_clock_getres +259 32 clock_nanosleep sys_clock_nanosleep_time32 +259 64 clock_nanosleep sys_clock_nanosleep +260 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +261 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +262 32 timer_settime sys_timer_settime32 +262 64 timer_settime sys_timer_settime +263 32 timer_gettime sys_timer_gettime32 +263 64 timer_gettime sys_timer_gettime +264 common timer_getoverrun sys_timer_getoverrun +265 common timer_delete sys_timer_delete +266 common timer_create sys_timer_create compat_sys_timer_create +# 267 was vserver +267 common vserver sys_nis_syscall +268 common io_setup sys_io_setup compat_sys_io_setup +269 common io_destroy sys_io_destroy +270 common io_submit sys_io_submit compat_sys_io_submit +271 common io_cancel sys_io_cancel +272 32 io_getevents sys_io_getevents_time32 +272 64 io_getevents sys_io_getevents +273 common mq_open sys_mq_open compat_sys_mq_open +274 common mq_unlink sys_mq_unlink +275 32 mq_timedsend sys_mq_timedsend_time32 +275 64 mq_timedsend sys_mq_timedsend +276 32 mq_timedreceive sys_mq_timedreceive_time32 +276 64 mq_timedreceive sys_mq_timedreceive +277 common mq_notify sys_mq_notify compat_sys_mq_notify +278 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +279 common waitid sys_waitid compat_sys_waitid +280 common tee sys_tee +281 common add_key sys_add_key +282 common request_key sys_request_key +283 common keyctl sys_keyctl compat_sys_keyctl +284 common openat sys_openat compat_sys_openat +285 common mkdirat sys_mkdirat +286 common mknodat sys_mknodat +287 common fchownat sys_fchownat +288 32 futimesat sys_futimesat_time32 +288 64 futimesat sys_futimesat +289 common fstatat64 sys_fstatat64 compat_sys_fstatat64 +290 common unlinkat sys_unlinkat +291 common renameat sys_renameat +292 common linkat sys_linkat +293 common symlinkat sys_symlinkat +294 common readlinkat sys_readlinkat +295 common fchmodat sys_fchmodat +296 common faccessat sys_faccessat +297 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +297 64 pselect6 sys_pselect6 +298 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +298 64 ppoll sys_ppoll +299 common unshare sys_unshare +300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +302 common migrate_pages sys_migrate_pages +303 common mbind sys_mbind +304 common get_mempolicy sys_get_mempolicy +305 common set_mempolicy sys_set_mempolicy +306 common kexec_load sys_kexec_load compat_sys_kexec_load +307 common move_pages sys_move_pages +308 common getcpu sys_getcpu +309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +310 32 utimensat sys_utimensat_time32 +310 64 utimensat sys_utimensat +311 common signalfd sys_signalfd compat_sys_signalfd +312 common timerfd_create sys_timerfd_create +313 common eventfd sys_eventfd +314 common fallocate sys_fallocate compat_sys_fallocate +315 32 timerfd_settime sys_timerfd_settime32 +315 64 timerfd_settime sys_timerfd_settime +316 32 timerfd_gettime sys_timerfd_gettime32 +316 64 timerfd_gettime sys_timerfd_gettime +317 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +318 common eventfd2 sys_eventfd2 +319 common epoll_create1 sys_epoll_create1 +320 common dup3 sys_dup3 +321 common pipe2 sys_pipe2 +322 common inotify_init1 sys_inotify_init1 +323 common accept4 sys_accept4 +324 common preadv sys_preadv compat_sys_preadv +325 common pwritev sys_pwritev compat_sys_pwritev +326 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +327 common perf_event_open sys_perf_event_open +328 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +328 64 recvmmsg sys_recvmmsg +329 common fanotify_init sys_fanotify_init +330 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark +331 common prlimit64 sys_prlimit64 +332 common name_to_handle_at sys_name_to_handle_at +333 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at +334 32 clock_adjtime sys_clock_adjtime32 +334 64 clock_adjtime sys_sparc_clock_adjtime +335 common syncfs sys_syncfs +336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +337 common setns sys_setns +338 common process_vm_readv sys_process_vm_readv +339 common process_vm_writev sys_process_vm_writev +340 32 kern_features sys_ni_syscall sys_kern_features +340 64 kern_features sys_kern_features +341 common kcmp sys_kcmp +342 common finit_module sys_finit_module +343 common sched_setattr sys_sched_setattr +344 common sched_getattr sys_sched_getattr +345 common renameat2 sys_renameat2 +346 common seccomp sys_seccomp +347 common getrandom sys_getrandom +348 common memfd_create sys_memfd_create +349 common bpf sys_bpf +350 32 execveat sys_execveat sys32_execveat +350 64 execveat sys64_execveat +351 common membarrier sys_membarrier +352 common userfaultfd sys_userfaultfd +353 common bind sys_bind +354 common listen sys_listen +355 common setsockopt sys_setsockopt sys_setsockopt +356 common mlock2 sys_mlock2 +357 common copy_file_range sys_copy_file_range +358 common preadv2 sys_preadv2 compat_sys_preadv2 +359 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +360 common statx sys_statx +361 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +361 64 io_pgetevents sys_io_pgetevents +362 common pkey_mprotect sys_pkey_mprotect +363 common pkey_alloc sys_pkey_alloc +364 common pkey_free sys_pkey_free +365 common rseq sys_rseq +# room for arch specific syscalls +392 64 semtimedop sys_semtimedop +393 common semget sys_semget +394 common semctl sys_semctl compat_sys_semctl +395 common shmget sys_shmget +396 common shmctl sys_shmctl compat_sys_shmctl +397 common shmat sys_shmat compat_sys_shmat +398 common shmdt sys_shmdt +399 common msgget sys_msgget +400 common msgsnd sys_msgsnd compat_sys_msgsnd +401 common msgrcv sys_msgrcv compat_sys_msgrcv +402 common msgctl sys_msgctl compat_sys_msgctl +403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime +404 32 clock_settime64 sys_clock_settime sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime +409 32 timer_settime64 sys_timer_settime sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register +428 common open_tree sys_open_tree +429 common move_mount sys_move_mount +430 common fsopen sys_fsopen +431 common fsconfig sys_fsconfig +432 common fsmount sys_fsmount +433 common fspick sys_fspick +434 common pidfd_open sys_pidfd_open +# 435 reserved for clone3 +436 common close_range sys_close_range +437 common openat2 sys_openat2 +438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise +441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr +443 common quotactl_fd sys_quotactl_fd +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c new file mode 100644 index 000000000..f19487e4c --- /dev/null +++ b/arch/sparc/kernel/sysfs.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* sysfs.c: Topology sysfs support code for sparc64. + * + * Copyright (C) 2007 David S. Miller + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64))); + +#define SHOW_MMUSTAT_ULONG(NAME) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ + return sprintf(buf, "%lu\n", p->NAME); \ +} \ +static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL) + +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte); + +static struct attribute *mmu_stat_attrs[] = { + &dev_attr_immu_tsb_hits_ctx0_8k_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_64k_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, + NULL, +}; + +static struct attribute_group mmu_stat_group = { + .attrs = mmu_stat_attrs, + .name = "mmu_stats", +}; + +static long read_mmustat_enable(void *data __maybe_unused) +{ + unsigned long ra = 0; + + sun4v_mmustat_info(&ra); + + return ra != 0; +} + +static long write_mmustat_enable(void *data) +{ + unsigned long ra, orig_ra, *val = data; + + if (*val) + ra = __pa(&per_cpu(mmu_stats, smp_processor_id())); + else + ra = 0UL; + + return sun4v_mmustat_conf(ra, &orig_ra); +} + +static ssize_t show_mmustat_enable(struct device *s, + struct device_attribute *attr, char *buf) +{ + long val = work_on_cpu(s->id, read_mmustat_enable, NULL); + + return sprintf(buf, "%lx\n", val); +} + +static ssize_t store_mmustat_enable(struct device *s, + struct device_attribute *attr, const char *buf, + size_t count) +{ + unsigned long val; + long err; + int ret; + + ret = sscanf(buf, "%lu", &val); + if (ret != 1) + return -EINVAL; + + err = work_on_cpu(s->id, write_mmustat_enable, &val); + if (err) + return -EIO; + + return count; +} + +static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); + +static int mmu_stats_supported; + +static int register_mmu_stats(struct device *s) +{ + if (!mmu_stats_supported) + return 0; + device_create_file(s, &dev_attr_mmustat_enable); + return sysfs_create_group(&s->kobj, &mmu_stat_group); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void unregister_mmu_stats(struct device *s) +{ + if (!mmu_stats_supported) + return; + sysfs_remove_group(&s->kobj, &mmu_stat_group); + device_remove_file(s, &dev_attr_mmustat_enable); +} +#endif + +#define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + cpuinfo_sparc *c = &cpu_data(dev->id); \ + return sprintf(buf, "%lu\n", c->MEMBER); \ +} + +#define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + cpuinfo_sparc *c = &cpu_data(dev->id); \ + return sprintf(buf, "%u\n", c->MEMBER); \ +} + +SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick); +SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size); +SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size); +SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size); +SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size); +SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size); +SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size); + +static struct device_attribute cpu_core_attrs[] = { + __ATTR(clock_tick, 0444, show_clock_tick, NULL), + __ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), + __ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), + __ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), + __ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), + __ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), + __ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), +}; + +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static int register_cpu_online(unsigned int cpu) +{ + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct device *s = &c->dev; + int i; + + for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) + device_create_file(s, &cpu_core_attrs[i]); + + register_mmu_stats(s); + return 0; +} + +static int unregister_cpu_online(unsigned int cpu) +{ +#ifdef CONFIG_HOTPLUG_CPU + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct device *s = &c->dev; + int i; + + unregister_mmu_stats(s); + for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) + device_remove_file(s, &cpu_core_attrs[i]); +#endif + return 0; +} + +static void __init check_mmu_stats(void) +{ + unsigned long dummy1, err; + + if (tlb_type != hypervisor) + return; + + err = sun4v_mmustat_info(&dummy1); + if (!err) + mmu_stats_supported = 1; +} + +static int __init topology_init(void) +{ + int cpu, ret; + + check_mmu_stats(); + + for_each_possible_cpu(cpu) { + struct cpu *c = &per_cpu(cpu_devices, cpu); + + register_cpu(c, cpu); + } + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "sparc/topology:online", + register_cpu_online, unregister_cpu_online); + WARN_ON(ret < 0); + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h new file mode 100644 index 000000000..bf014267d --- /dev/null +++ b/arch/sparc/kernel/systbls.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SYSTBLS_H +#define _SYSTBLS_H + +#include +#include +#include +#include + +#include + +asmlinkage long sys_getpagesize(void); +asmlinkage long sys_sparc_pipe(void); +asmlinkage long sys_nis_syscall(void); +asmlinkage long sys_getdomainname(char __user *name, int len); +void do_rt_sigreturn(struct pt_regs *regs); +asmlinkage long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long off); +asmlinkage void sparc_breakpoint(struct pt_regs *regs); + +#ifdef CONFIG_SPARC32 +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +long sys_sparc_remap_file_pages(unsigned long start, unsigned long size, + unsigned long prot, unsigned long pgoff, + unsigned long flags); + +#endif /* CONFIG_SPARC32 */ + +#ifdef CONFIG_SPARC64 +asmlinkage long sys_sparc_ipc(unsigned int call, int first, + unsigned long second, + unsigned long third, + void __user *ptr, long fifth); +asmlinkage long sparc64_personality(unsigned long personality); +asmlinkage long sys64_munmap(unsigned long addr, size_t len); +asmlinkage unsigned long sys64_mremap(unsigned long addr, + unsigned long old_len, + unsigned long new_len, + unsigned long flags, + unsigned long new_addr); +asmlinkage long sys_utrap_install(utrap_entry_t type, + utrap_handler_t new_p, + utrap_handler_t new_d, + utrap_handler_t __user *old_p, + utrap_handler_t __user *old_d); +asmlinkage long sys_memory_ordering(unsigned long model); +asmlinkage void sparc64_set_context(struct pt_regs *regs); +asmlinkage void sparc64_get_context(struct pt_regs *regs); +asmlinkage long compat_sys_truncate64(const char __user * path, + u32 high, + u32 low); +asmlinkage long compat_sys_ftruncate64(unsigned int fd, + u32 high, + u32 low); +struct compat_stat64; +asmlinkage long compat_sys_stat64(const char __user * filename, + struct compat_stat64 __user *statbuf); +asmlinkage long compat_sys_lstat64(const char __user * filename, + struct compat_stat64 __user *statbuf); +asmlinkage long compat_sys_fstat64(unsigned int fd, + struct compat_stat64 __user * statbuf); +asmlinkage long compat_sys_fstatat64(unsigned int dfd, + const char __user *filename, + struct compat_stat64 __user * statbuf, int flag); +asmlinkage long compat_sys_pread64(unsigned int fd, + char __user *ubuf, + compat_size_t count, + u32 poshi, + u32 poslo); +asmlinkage long compat_sys_pwrite64(unsigned int fd, + char __user *ubuf, + compat_size_t count, + u32 poshi, + u32 poslo); +asmlinkage long compat_sys_readahead(int fd, + unsigned offhi, + unsigned offlo, + compat_size_t count); +long compat_sys_fadvise64(int fd, + unsigned offhi, + unsigned offlo, + compat_size_t len, int advice); +long compat_sys_fadvise64_64(int fd, + unsigned offhi, unsigned offlo, + unsigned lenhi, unsigned lenlo, + int advice); +long compat_sys_sync_file_range(unsigned int fd, + unsigned off_high, unsigned off_low, + unsigned nb_high, unsigned nb_low, + unsigned int flags); +asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo, + u32 lenhi, u32 lenlo); +asmlinkage long compat_sys_fstat64(unsigned int fd, + struct compat_stat64 __user * statbuf); +asmlinkage long compat_sys_fstatat64(unsigned int dfd, + const char __user *filename, + struct compat_stat64 __user * statbuf, + int flag); +#endif /* CONFIG_SPARC64 */ +#endif /* _SYSTBLS_H */ diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S new file mode 100644 index 000000000..3aaffa017 --- /dev/null +++ b/arch/sparc/kernel/systbls_32.S @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* systbls.S: System call entry point tables for OS compatibility. + * The native Linux system call table lives here also. + * + * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) + * + * Based upon preliminary work which is: + * + * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) + */ + +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, entry) .long entry + .data + .align 4 + .globl sys_call_table +sys_call_table: +#include /* 32-bit native syscalls */ diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S new file mode 100644 index 000000000..398fe449d --- /dev/null +++ b/arch/sparc/kernel/systbls_64.S @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* systbls.S: System call entry point tables for OS compatibility. + * The native Linux system call table lives here also. + * + * Copyright (C) 1995, 1996, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * + * Based upon preliminary work which is: + * + * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) + */ + +#define __SYSCALL(nr, entry) .word entry + .text + .align 4 +#ifdef CONFIG_COMPAT + .globl sys_call_table32 +sys_call_table32: +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) +#include /* Compat syscalls */ +#undef __SYSCALL_WITH_COMPAT +#endif /* CONFIG_COMPAT */ + + .align 4 + .globl sys_call_table64, sys_call_table +sys_call_table64: +sys_call_table: +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#include /* 64-bit native syscalls */ diff --git a/arch/sparc/kernel/termios.c b/arch/sparc/kernel/termios.c new file mode 100644 index 000000000..ee64965c2 --- /dev/null +++ b/arch/sparc/kernel/termios.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* + * c_cc characters in the termio structure. Oh, how I love being + * backwardly compatible. Notice that character 4 and 5 are + * interpreted differently depending on whether ICANON is set in + * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise + * as _VMIN and V_TIME. This is for compatibility with OSF/1 (which + * is compatible with sysV)... + */ +#define _VMIN 4 +#define _VTIME 5 + +int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + struct termio v; + memset(&v, 0, sizeof(struct termio)); + v.c_iflag = termios->c_iflag; + v.c_oflag = termios->c_oflag; + v.c_cflag = termios->c_cflag; + v.c_lflag = termios->c_lflag; + v.c_line = termios->c_line; + memcpy(v.c_cc, termios->c_cc, NCC); + if (!(v.c_lflag & ICANON)) { + v.c_cc[_VMIN] = termios->c_cc[VMIN]; + v.c_cc[_VTIME] = termios->c_cc[VTIME]; + } + return copy_to_user(termio, &v, sizeof(struct termio)); +} + +int user_termios_to_kernel_termios(struct ktermios *k, + struct termios2 __user *u) +{ + int err; + err = get_user(k->c_iflag, &u->c_iflag); + err |= get_user(k->c_oflag, &u->c_oflag); + err |= get_user(k->c_cflag, &u->c_cflag); + err |= get_user(k->c_lflag, &u->c_lflag); + err |= get_user(k->c_line, &u->c_line); + err |= copy_from_user(k->c_cc, u->c_cc, NCCS); + if (k->c_lflag & ICANON) { + err |= get_user(k->c_cc[VEOF], &u->c_cc[VEOF]); + err |= get_user(k->c_cc[VEOL], &u->c_cc[VEOL]); + } else { + err |= get_user(k->c_cc[VMIN], &u->c_cc[_VMIN]); + err |= get_user(k->c_cc[VTIME], &u->c_cc[_VTIME]); + } + err |= get_user(k->c_ispeed, &u->c_ispeed); + err |= get_user(k->c_ospeed, &u->c_ospeed); + return err; +} + +int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + int err; + err = put_user(k->c_iflag, &u->c_iflag); + err |= put_user(k->c_oflag, &u->c_oflag); + err |= put_user(k->c_cflag, &u->c_cflag); + err |= put_user(k->c_lflag, &u->c_lflag); + err |= put_user(k->c_line, &u->c_line); + err |= copy_to_user(u->c_cc, k->c_cc, NCCS); + if (!(k->c_lflag & ICANON)) { + err |= put_user(k->c_cc[VMIN], &u->c_cc[_VMIN]); + err |= put_user(k->c_cc[VTIME], &u->c_cc[_VTIME]); + } else { + err |= put_user(k->c_cc[VEOF], &u->c_cc[VEOF]); + err |= put_user(k->c_cc[VEOL], &u->c_cc[VEOL]); + } + err |= put_user(k->c_ispeed, &u->c_ispeed); + err |= put_user(k->c_ospeed, &u->c_ospeed); + return err; +} + +int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + int err; + err = get_user(k->c_iflag, &u->c_iflag); + err |= get_user(k->c_oflag, &u->c_oflag); + err |= get_user(k->c_cflag, &u->c_cflag); + err |= get_user(k->c_lflag, &u->c_lflag); + err |= get_user(k->c_line, &u->c_line); + err |= copy_from_user(k->c_cc, u->c_cc, NCCS); + if (k->c_lflag & ICANON) { + err |= get_user(k->c_cc[VEOF], &u->c_cc[VEOF]); + err |= get_user(k->c_cc[VEOL], &u->c_cc[VEOL]); + } else { + err |= get_user(k->c_cc[VMIN], &u->c_cc[_VMIN]); + err |= get_user(k->c_cc[VTIME], &u->c_cc[_VTIME]); + } + return err; +} + +int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + int err; + err = put_user(k->c_iflag, &u->c_iflag); + err |= put_user(k->c_oflag, &u->c_oflag); + err |= put_user(k->c_cflag, &u->c_cflag); + err |= put_user(k->c_lflag, &u->c_lflag); + err |= put_user(k->c_line, &u->c_line); + err |= copy_to_user(u->c_cc, k->c_cc, NCCS); + if (!(k->c_lflag & ICANON)) { + err |= put_user(k->c_cc[VMIN], &u->c_cc[_VMIN]); + err |= put_user(k->c_cc[VTIME], &u->c_cc[_VTIME]); + } else { + err |= put_user(k->c_cc[VEOF], &u->c_cc[VEOF]); + err |= put_user(k->c_cc[VEOL], &u->c_cc[VEOL]); + } + return err; +} diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c new file mode 100644 index 000000000..8a08830e4 --- /dev/null +++ b/arch/sparc/kernel/time_32.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0 +/* linux/arch/sparc/kernel/time.c + * + * Copyright (C) 1995 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) + * + * Chris Davis (cdavis@cois.on.ca) 03/27/1998 + * Added support for the intersil on the sun4/4200 + * + * Gleb Raiko (rajko@mech.math.msu.su) 08/18/1998 + * Support for MicroSPARC-IIep, PCI CPU. + * + * This file handles the Sparc specific time handling details. + * + * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 + * "A Kernel Model for Precision Timekeeping" by Dave Mills + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel.h" +#include "irq.h" + +static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock); +static __volatile__ u64 timer_cs_internal_counter = 0; +static char timer_cs_enabled = 0; + +static struct clock_event_device timer_ce; +static char timer_ce_enabled = 0; + +#ifdef CONFIG_SMP +DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent); +#endif + +DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); + +unsigned long profile_pc(struct pt_regs *regs) +{ + extern char __copy_user_begin[], __copy_user_end[]; + extern char __bzero_begin[], __bzero_end[]; + + unsigned long pc = regs->pc; + + if (in_lock_functions(pc) || + (pc >= (unsigned long) __copy_user_begin && + pc < (unsigned long) __copy_user_end) || + (pc >= (unsigned long) __bzero_begin && + pc < (unsigned long) __bzero_end)) + pc = regs->u_regs[UREG_RETPC]; + return pc; +} + +EXPORT_SYMBOL(profile_pc); + +volatile u32 __iomem *master_l10_counter; + +irqreturn_t notrace timer_interrupt(int dummy, void *dev_id) +{ + if (timer_cs_enabled) { + write_seqlock(&timer_cs_lock); + timer_cs_internal_counter++; + sparc_config.clear_clock_irq(); + write_sequnlock(&timer_cs_lock); + } else { + sparc_config.clear_clock_irq(); + } + + if (timer_ce_enabled) + timer_ce.event_handler(&timer_ce); + + return IRQ_HANDLED; +} + +static int timer_ce_shutdown(struct clock_event_device *evt) +{ + timer_ce_enabled = 0; + smp_mb(); + return 0; +} + +static int timer_ce_set_periodic(struct clock_event_device *evt) +{ + timer_ce_enabled = 1; + smp_mb(); + return 0; +} + +static __init void setup_timer_ce(void) +{ + struct clock_event_device *ce = &timer_ce; + + BUG_ON(smp_processor_id() != boot_cpu_id); + + ce->name = "timer_ce"; + ce->rating = 100; + ce->features = CLOCK_EVT_FEAT_PERIODIC; + ce->set_state_shutdown = timer_ce_shutdown; + ce->set_state_periodic = timer_ce_set_periodic; + ce->tick_resume = timer_ce_set_periodic; + ce->cpumask = cpu_possible_mask; + ce->shift = 32; + ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, + ce->shift); + clockevents_register_device(ce); +} + +static unsigned int sbus_cycles_offset(void) +{ + u32 val, offset; + + val = sbus_readl(master_l10_counter); + offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK; + + /* Limit hit? */ + if (val & TIMER_LIMIT_BIT) + offset += sparc_config.cs_period; + + return offset; +} + +static u64 timer_cs_read(struct clocksource *cs) +{ + unsigned int seq, offset; + u64 cycles; + + do { + seq = read_seqbegin(&timer_cs_lock); + + cycles = timer_cs_internal_counter; + offset = sparc_config.get_cycles_offset(); + } while (read_seqretry(&timer_cs_lock, seq)); + + /* Count absolute cycles */ + cycles *= sparc_config.cs_period; + cycles += offset; + + return cycles; +} + +static struct clocksource timer_cs = { + .name = "timer_cs", + .rating = 100, + .read = timer_cs_read, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static __init int setup_timer_cs(void) +{ + timer_cs_enabled = 1; + return clocksource_register_hz(&timer_cs, sparc_config.clock_rate); +} + +#ifdef CONFIG_SMP +static int percpu_ce_shutdown(struct clock_event_device *evt) +{ + int cpu = cpumask_first(evt->cpumask); + + sparc_config.load_profile_irq(cpu, 0); + return 0; +} + +static int percpu_ce_set_periodic(struct clock_event_device *evt) +{ + int cpu = cpumask_first(evt->cpumask); + + sparc_config.load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ); + return 0; +} + +static int percpu_ce_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + int cpu = cpumask_first(evt->cpumask); + unsigned int next = (unsigned int)delta; + + sparc_config.load_profile_irq(cpu, next); + return 0; +} + +void register_percpu_ce(int cpu) +{ + struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu); + unsigned int features = CLOCK_EVT_FEAT_PERIODIC; + + if (sparc_config.features & FEAT_L14_ONESHOT) + features |= CLOCK_EVT_FEAT_ONESHOT; + + ce->name = "percpu_ce"; + ce->rating = 200; + ce->features = features; + ce->set_state_shutdown = percpu_ce_shutdown; + ce->set_state_periodic = percpu_ce_set_periodic; + ce->set_state_oneshot = percpu_ce_shutdown; + ce->set_next_event = percpu_ce_set_next_event; + ce->cpumask = cpumask_of(cpu); + ce->shift = 32; + ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, + ce->shift); + ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce); + ce->max_delta_ticks = (unsigned long)sparc_config.clock_rate; + ce->min_delta_ns = clockevent_delta2ns(100, ce); + ce->min_delta_ticks = 100; + + clockevents_register_device(ce); +} +#endif + +static unsigned char mostek_read_byte(struct device *dev, u32 ofs) +{ + struct platform_device *pdev = to_platform_device(dev); + struct m48t59_plat_data *pdata = pdev->dev.platform_data; + + return readb(pdata->ioaddr + ofs); +} + +static void mostek_write_byte(struct device *dev, u32 ofs, u8 val) +{ + struct platform_device *pdev = to_platform_device(dev); + struct m48t59_plat_data *pdata = pdev->dev.platform_data; + + writeb(val, pdata->ioaddr + ofs); +} + +static struct m48t59_plat_data m48t59_data = { + .read_byte = mostek_read_byte, + .write_byte = mostek_write_byte, +}; + +/* resource is set at runtime */ +static struct platform_device m48t59_rtc = { + .name = "rtc-m48t59", + .id = 0, + .num_resources = 1, + .dev = { + .platform_data = &m48t59_data, + }, +}; + +static int clock_probe(struct platform_device *op) +{ + struct device_node *dp = op->dev.of_node; + const char *model = of_get_property(dp, "model", NULL); + + if (!model) + return -ENODEV; + + /* Only the primary RTC has an address property */ + if (!of_find_property(dp, "address", NULL)) + return -ENODEV; + + m48t59_rtc.resource = &op->resource[0]; + if (!strcmp(model, "mk48t02")) { + /* Map the clock register io area read-only */ + m48t59_data.ioaddr = of_ioremap(&op->resource[0], 0, + 2048, "rtc-m48t59"); + m48t59_data.type = M48T59RTC_TYPE_M48T02; + } else if (!strcmp(model, "mk48t08")) { + m48t59_data.ioaddr = of_ioremap(&op->resource[0], 0, + 8192, "rtc-m48t59"); + m48t59_data.type = M48T59RTC_TYPE_M48T08; + } else + return -ENODEV; + + if (platform_device_register(&m48t59_rtc) < 0) + printk(KERN_ERR "Registering RTC device failed\n"); + + return 0; +} + +static const struct of_device_id clock_match[] = { + { + .name = "eeprom", + }, + {}, +}; + +static struct platform_driver clock_driver = { + .probe = clock_probe, + .driver = { + .name = "rtc", + .of_match_table = clock_match, + }, +}; + + +/* Probe for the mostek real time clock chip. */ +static int __init clock_init(void) +{ + return platform_driver_register(&clock_driver); +} +/* Must be after subsys_initcall() so that busses are probed. Must + * be before device_initcall() because things like the RTC driver + * need to see the clock registers. + */ +fs_initcall(clock_init); + +static void __init sparc32_late_time_init(void) +{ + if (sparc_config.features & FEAT_L10_CLOCKEVENT) + setup_timer_ce(); + if (sparc_config.features & FEAT_L10_CLOCKSOURCE) + setup_timer_cs(); +#ifdef CONFIG_SMP + register_percpu_ce(smp_processor_id()); +#endif +} + +static void __init sbus_time_init(void) +{ + sparc_config.get_cycles_offset = sbus_cycles_offset; + sparc_config.init_timers(); +} + +void __init time_init(void) +{ + sparc_config.features = 0; + late_time_init = sparc32_late_time_init; + + if (pcic_present()) + pci_time_init(); + else + sbus_time_init(); +} + diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c new file mode 100644 index 000000000..89fb05f90 --- /dev/null +++ b/arch/sparc/kernel/time_64.c @@ -0,0 +1,899 @@ +// SPDX-License-Identifier: GPL-2.0 +/* time.c: UltraSparc timer and TOD clock support. + * + * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) + * + * Based largely on code which is: + * + * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "entry.h" +#include "kernel.h" + +DEFINE_SPINLOCK(rtc_lock); + +#ifdef CONFIG_SMP +unsigned long profile_pc(struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); + + if (in_lock_functions(pc)) + return regs->u_regs[UREG_RETPC]; + return pc; +} +EXPORT_SYMBOL(profile_pc); +#endif + +static void tick_disable_protection(void) +{ + /* Set things up so user can access tick register for profiling + * purposes. Also workaround BB_ERRATA_1 by doing a dummy + * read back of %tick after writing it. + */ + __asm__ __volatile__( + " ba,pt %%xcc, 1f\n" + " nop\n" + " .align 64\n" + "1: rd %%tick, %%g2\n" + " add %%g2, 6, %%g2\n" + " andn %%g2, %0, %%g2\n" + " wrpr %%g2, 0, %%tick\n" + " rdpr %%tick, %%g0" + : /* no outputs */ + : "r" (TICK_PRIV_BIT) + : "g2"); +} + +static void tick_disable_irq(void) +{ + __asm__ __volatile__( + " ba,pt %%xcc, 1f\n" + " nop\n" + " .align 64\n" + "1: wr %0, 0x0, %%tick_cmpr\n" + " rd %%tick_cmpr, %%g0" + : /* no outputs */ + : "r" (TICKCMP_IRQ_BIT)); +} + +static void tick_init_tick(void) +{ + tick_disable_protection(); + tick_disable_irq(); +} + +static unsigned long long tick_get_tick(void) +{ + unsigned long ret; + + __asm__ __volatile__("rd %%tick, %0\n\t" + "mov %0, %0" + : "=r" (ret)); + + return ret & ~TICK_PRIV_BIT; +} + +static int tick_add_compare(unsigned long adj) +{ + unsigned long orig_tick, new_tick, new_compare; + + __asm__ __volatile__("rd %%tick, %0" + : "=r" (orig_tick)); + + orig_tick &= ~TICKCMP_IRQ_BIT; + + /* Workaround for Spitfire Errata (#54 I think??), I discovered + * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch + * number 103640. + * + * On Blackbird writes to %tick_cmpr can fail, the + * workaround seems to be to execute the wr instruction + * at the start of an I-cache line, and perform a dummy + * read back from %tick_cmpr right after writing to it. -DaveM + */ + __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" + " add %1, %2, %0\n\t" + ".align 64\n" + "1:\n\t" + "wr %0, 0, %%tick_cmpr\n\t" + "rd %%tick_cmpr, %%g0\n\t" + : "=r" (new_compare) + : "r" (orig_tick), "r" (adj)); + + __asm__ __volatile__("rd %%tick, %0" + : "=r" (new_tick)); + new_tick &= ~TICKCMP_IRQ_BIT; + + return ((long)(new_tick - (orig_tick+adj))) > 0L; +} + +static unsigned long tick_add_tick(unsigned long adj) +{ + unsigned long new_tick; + + /* Also need to handle Blackbird bug here too. */ + __asm__ __volatile__("rd %%tick, %0\n\t" + "add %0, %1, %0\n\t" + "wrpr %0, 0, %%tick\n\t" + : "=&r" (new_tick) + : "r" (adj)); + + return new_tick; +} + +/* Searches for cpu clock frequency with given cpuid in OpenBoot tree */ +static unsigned long cpuid_to_freq(phandle node, int cpuid) +{ + bool is_cpu_node = false; + unsigned long freq = 0; + char type[128]; + + if (!node) + return freq; + + if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1) + is_cpu_node = (strcmp(type, "cpu") == 0); + + /* try upa-portid then cpuid to get cpuid, see prom_64.c */ + if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid || + prom_getint(node, "cpuid") == cpuid)) + freq = prom_getintdefault(node, "clock-frequency", 0); + if (!freq) + freq = cpuid_to_freq(prom_getchild(node), cpuid); + if (!freq) + freq = cpuid_to_freq(prom_getsibling(node), cpuid); + + return freq; +} + +static unsigned long tick_get_frequency(void) +{ + return cpuid_to_freq(prom_root_node, hard_smp_processor_id()); +} + +static struct sparc64_tick_ops tick_operations __cacheline_aligned = { + .name = "tick", + .init_tick = tick_init_tick, + .disable_irq = tick_disable_irq, + .get_tick = tick_get_tick, + .add_tick = tick_add_tick, + .add_compare = tick_add_compare, + .get_frequency = tick_get_frequency, + .softint_mask = 1UL << 0, +}; + +struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; +EXPORT_SYMBOL(tick_ops); + +static void stick_disable_irq(void) +{ + __asm__ __volatile__( + "wr %0, 0x0, %%asr25" + : /* no outputs */ + : "r" (TICKCMP_IRQ_BIT)); +} + +static void stick_init_tick(void) +{ + /* Writes to the %tick and %stick register are not + * allowed on sun4v. The Hypervisor controls that + * bit, per-strand. + */ + if (tlb_type != hypervisor) { + tick_disable_protection(); + tick_disable_irq(); + + /* Let the user get at STICK too. */ + __asm__ __volatile__( + " rd %%asr24, %%g2\n" + " andn %%g2, %0, %%g2\n" + " wr %%g2, 0, %%asr24" + : /* no outputs */ + : "r" (TICK_PRIV_BIT) + : "g1", "g2"); + } + + stick_disable_irq(); +} + +static unsigned long long stick_get_tick(void) +{ + unsigned long ret; + + __asm__ __volatile__("rd %%asr24, %0" + : "=r" (ret)); + + return ret & ~TICK_PRIV_BIT; +} + +static unsigned long stick_add_tick(unsigned long adj) +{ + unsigned long new_tick; + + __asm__ __volatile__("rd %%asr24, %0\n\t" + "add %0, %1, %0\n\t" + "wr %0, 0, %%asr24\n\t" + : "=&r" (new_tick) + : "r" (adj)); + + return new_tick; +} + +static int stick_add_compare(unsigned long adj) +{ + unsigned long orig_tick, new_tick; + + __asm__ __volatile__("rd %%asr24, %0" + : "=r" (orig_tick)); + orig_tick &= ~TICKCMP_IRQ_BIT; + + __asm__ __volatile__("wr %0, 0, %%asr25" + : /* no outputs */ + : "r" (orig_tick + adj)); + + __asm__ __volatile__("rd %%asr24, %0" + : "=r" (new_tick)); + new_tick &= ~TICKCMP_IRQ_BIT; + + return ((long)(new_tick - (orig_tick+adj))) > 0L; +} + +static unsigned long stick_get_frequency(void) +{ + return prom_getintdefault(prom_root_node, "stick-frequency", 0); +} + +static struct sparc64_tick_ops stick_operations __read_mostly = { + .name = "stick", + .init_tick = stick_init_tick, + .disable_irq = stick_disable_irq, + .get_tick = stick_get_tick, + .add_tick = stick_add_tick, + .add_compare = stick_add_compare, + .get_frequency = stick_get_frequency, + .softint_mask = 1UL << 16, +}; + +/* On Hummingbird the STICK/STICK_CMPR register is implemented + * in I/O space. There are two 64-bit registers each, the + * first holds the low 32-bits of the value and the second holds + * the high 32-bits. + * + * Since STICK is constantly updating, we have to access it carefully. + * + * The sequence we use to read is: + * 1) read high + * 2) read low + * 3) read high again, if it rolled re-read both low and high again. + * + * Writing STICK safely is also tricky: + * 1) write low to zero + * 2) write high + * 3) write low + */ +static unsigned long __hbird_read_stick(void) +{ + unsigned long ret, tmp1, tmp2, tmp3; + unsigned long addr = HBIRD_STICK_ADDR+8; + + __asm__ __volatile__("ldxa [%1] %5, %2\n" + "1:\n\t" + "sub %1, 0x8, %1\n\t" + "ldxa [%1] %5, %3\n\t" + "add %1, 0x8, %1\n\t" + "ldxa [%1] %5, %4\n\t" + "cmp %4, %2\n\t" + "bne,a,pn %%xcc, 1b\n\t" + " mov %4, %2\n\t" + "sllx %4, 32, %4\n\t" + "or %3, %4, %0\n\t" + : "=&r" (ret), "=&r" (addr), + "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3) + : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr)); + + return ret; +} + +static void __hbird_write_stick(unsigned long val) +{ + unsigned long low = (val & 0xffffffffUL); + unsigned long high = (val >> 32UL); + unsigned long addr = HBIRD_STICK_ADDR; + + __asm__ __volatile__("stxa %%g0, [%0] %4\n\t" + "add %0, 0x8, %0\n\t" + "stxa %3, [%0] %4\n\t" + "sub %0, 0x8, %0\n\t" + "stxa %2, [%0] %4" + : "=&r" (addr) + : "0" (addr), "r" (low), "r" (high), + "i" (ASI_PHYS_BYPASS_EC_E)); +} + +static void __hbird_write_compare(unsigned long val) +{ + unsigned long low = (val & 0xffffffffUL); + unsigned long high = (val >> 32UL); + unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL; + + __asm__ __volatile__("stxa %3, [%0] %4\n\t" + "sub %0, 0x8, %0\n\t" + "stxa %2, [%0] %4" + : "=&r" (addr) + : "0" (addr), "r" (low), "r" (high), + "i" (ASI_PHYS_BYPASS_EC_E)); +} + +static void hbtick_disable_irq(void) +{ + __hbird_write_compare(TICKCMP_IRQ_BIT); +} + +static void hbtick_init_tick(void) +{ + tick_disable_protection(); + + /* XXX This seems to be necessary to 'jumpstart' Hummingbird + * XXX into actually sending STICK interrupts. I think because + * XXX of how we store %tick_cmpr in head.S this somehow resets the + * XXX {TICK + STICK} interrupt mux. -DaveM + */ + __hbird_write_stick(__hbird_read_stick()); + + hbtick_disable_irq(); +} + +static unsigned long long hbtick_get_tick(void) +{ + return __hbird_read_stick() & ~TICK_PRIV_BIT; +} + +static unsigned long hbtick_add_tick(unsigned long adj) +{ + unsigned long val; + + val = __hbird_read_stick() + adj; + __hbird_write_stick(val); + + return val; +} + +static int hbtick_add_compare(unsigned long adj) +{ + unsigned long val = __hbird_read_stick(); + unsigned long val2; + + val &= ~TICKCMP_IRQ_BIT; + val += adj; + __hbird_write_compare(val); + + val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT; + + return ((long)(val2 - val)) > 0L; +} + +static unsigned long hbtick_get_frequency(void) +{ + return prom_getintdefault(prom_root_node, "stick-frequency", 0); +} + +static struct sparc64_tick_ops hbtick_operations __read_mostly = { + .name = "hbtick", + .init_tick = hbtick_init_tick, + .disable_irq = hbtick_disable_irq, + .get_tick = hbtick_get_tick, + .add_tick = hbtick_add_tick, + .add_compare = hbtick_add_compare, + .get_frequency = hbtick_get_frequency, + .softint_mask = 1UL << 0, +}; + +unsigned long cmos_regs; +EXPORT_SYMBOL(cmos_regs); + +static struct resource rtc_cmos_resource; + +static struct platform_device rtc_cmos_device = { + .name = "rtc_cmos", + .id = -1, + .resource = &rtc_cmos_resource, + .num_resources = 1, +}; + +static int rtc_probe(struct platform_device *op) +{ + struct resource *r; + + printk(KERN_INFO "%pOF: RTC regs at 0x%llx\n", + op->dev.of_node, op->resource[0].start); + + /* The CMOS RTC driver only accepts IORESOURCE_IO, so cons + * up a fake resource so that the probe works for all cases. + * When the RTC is behind an ISA bus it will have IORESOURCE_IO + * already, whereas when it's behind EBUS is will be IORESOURCE_MEM. + */ + + r = &rtc_cmos_resource; + r->flags = IORESOURCE_IO; + r->name = op->resource[0].name; + r->start = op->resource[0].start; + r->end = op->resource[0].end; + + cmos_regs = op->resource[0].start; + return platform_device_register(&rtc_cmos_device); +} + +static const struct of_device_id rtc_match[] = { + { + .name = "rtc", + .compatible = "m5819", + }, + { + .name = "rtc", + .compatible = "isa-m5819p", + }, + { + .name = "rtc", + .compatible = "isa-m5823p", + }, + { + .name = "rtc", + .compatible = "ds1287", + }, + {}, +}; + +static struct platform_driver rtc_driver = { + .probe = rtc_probe, + .driver = { + .name = "rtc", + .of_match_table = rtc_match, + }, +}; + +static struct platform_device rtc_bq4802_device = { + .name = "rtc-bq4802", + .id = -1, + .num_resources = 1, +}; + +static int bq4802_probe(struct platform_device *op) +{ + + printk(KERN_INFO "%pOF: BQ4802 regs at 0x%llx\n", + op->dev.of_node, op->resource[0].start); + + rtc_bq4802_device.resource = &op->resource[0]; + return platform_device_register(&rtc_bq4802_device); +} + +static const struct of_device_id bq4802_match[] = { + { + .name = "rtc", + .compatible = "bq4802", + }, + {}, +}; + +static struct platform_driver bq4802_driver = { + .probe = bq4802_probe, + .driver = { + .name = "bq4802", + .of_match_table = bq4802_match, + }, +}; + +static unsigned char mostek_read_byte(struct device *dev, u32 ofs) +{ + struct platform_device *pdev = to_platform_device(dev); + void __iomem *regs = (void __iomem *) pdev->resource[0].start; + + return readb(regs + ofs); +} + +static void mostek_write_byte(struct device *dev, u32 ofs, u8 val) +{ + struct platform_device *pdev = to_platform_device(dev); + void __iomem *regs = (void __iomem *) pdev->resource[0].start; + + writeb(val, regs + ofs); +} + +static struct m48t59_plat_data m48t59_data = { + .read_byte = mostek_read_byte, + .write_byte = mostek_write_byte, +}; + +static struct platform_device m48t59_rtc = { + .name = "rtc-m48t59", + .id = 0, + .num_resources = 1, + .dev = { + .platform_data = &m48t59_data, + }, +}; + +static int mostek_probe(struct platform_device *op) +{ + struct device_node *dp = op->dev.of_node; + + /* On an Enterprise system there can be multiple mostek clocks. + * We should only match the one that is on the central FHC bus. + */ + if (of_node_name_eq(dp->parent, "fhc") && + !of_node_name_eq(dp->parent->parent, "central")) + return -ENODEV; + + printk(KERN_INFO "%pOF: Mostek regs at 0x%llx\n", + dp, op->resource[0].start); + + m48t59_rtc.resource = &op->resource[0]; + return platform_device_register(&m48t59_rtc); +} + +static const struct of_device_id mostek_match[] = { + { + .name = "eeprom", + }, + {}, +}; + +static struct platform_driver mostek_driver = { + .probe = mostek_probe, + .driver = { + .name = "mostek", + .of_match_table = mostek_match, + }, +}; + +static struct platform_device rtc_sun4v_device = { + .name = "rtc-sun4v", + .id = -1, +}; + +static struct platform_device rtc_starfire_device = { + .name = "rtc-starfire", + .id = -1, +}; + +static int __init clock_init(void) +{ + if (this_is_starfire) + return platform_device_register(&rtc_starfire_device); + + if (tlb_type == hypervisor) + return platform_device_register(&rtc_sun4v_device); + + (void) platform_driver_register(&rtc_driver); + (void) platform_driver_register(&mostek_driver); + (void) platform_driver_register(&bq4802_driver); + + return 0; +} + +/* Must be after subsys_initcall() so that busses are probed. Must + * be before device_initcall() because things like the RTC driver + * need to see the clock registers. + */ +fs_initcall(clock_init); + +/* Return true if this is Hummingbird, aka Ultra-IIe */ +static bool is_hummingbird(void) +{ + unsigned long ver, manuf, impl; + + __asm__ __volatile__ ("rdpr %%ver, %0" + : "=&r" (ver)); + manuf = ((ver >> 48) & 0xffff); + impl = ((ver >> 32) & 0xffff); + + return (manuf == 0x17 && impl == 0x13); +} + +struct freq_table { + unsigned long clock_tick_ref; + unsigned int ref_freq; +}; +static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 }; + +unsigned long sparc64_get_clock_tick(unsigned int cpu) +{ + struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); + + if (ft->clock_tick_ref) + return ft->clock_tick_ref; + return cpu_data(cpu).clock_tick; +} +EXPORT_SYMBOL(sparc64_get_clock_tick); + +#ifdef CONFIG_CPU_FREQ + +static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + unsigned int cpu; + struct freq_table *ft; + + for_each_cpu(cpu, freq->policy->cpus) { + ft = &per_cpu(sparc64_freq_table, cpu); + + if (!ft->ref_freq) { + ft->ref_freq = freq->old; + ft->clock_tick_ref = cpu_data(cpu).clock_tick; + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + cpu_data(cpu).clock_tick = + cpufreq_scale(ft->clock_tick_ref, ft->ref_freq, + freq->new); + } + } + + return 0; +} + +static struct notifier_block sparc64_cpufreq_notifier_block = { + .notifier_call = sparc64_cpufreq_notifier +}; + +static int __init register_sparc64_cpufreq_notifier(void) +{ + + cpufreq_register_notifier(&sparc64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + return 0; +} + +core_initcall(register_sparc64_cpufreq_notifier); + +#endif /* CONFIG_CPU_FREQ */ + +static int sparc64_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + return tick_operations.add_compare(delta) ? -ETIME : 0; +} + +static int sparc64_timer_shutdown(struct clock_event_device *evt) +{ + tick_operations.disable_irq(); + return 0; +} + +static struct clock_event_device sparc64_clockevent = { + .features = CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = sparc64_timer_shutdown, + .set_next_event = sparc64_next_event, + .rating = 100, + .shift = 30, + .irq = -1, +}; +static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); + +void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + unsigned long tick_mask = tick_operations.softint_mask; + int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); + + clear_softint(tick_mask); + + irq_enter(); + + local_cpu_data().irq0_irqs++; + kstat_incr_irq_this_cpu(0); + + if (unlikely(!evt->event_handler)) { + printk(KERN_WARNING + "Spurious SPARC64 timer interrupt on cpu %d\n", cpu); + } else + evt->event_handler(evt); + + irq_exit(); + + set_irq_regs(old_regs); +} + +void setup_sparc64_timer(void) +{ + struct clock_event_device *sevt; + unsigned long pstate; + + /* Guarantee that the following sequences execute + * uninterrupted. + */ + __asm__ __volatile__("rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); + + tick_operations.init_tick(); + + /* Restore PSTATE_IE. */ + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" + : /* no outputs */ + : "r" (pstate)); + + sevt = this_cpu_ptr(&sparc64_events); + + memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); + sevt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_register_device(sevt); +} + +#define SPARC64_NSEC_PER_CYC_SHIFT 10UL + +static struct clocksource clocksource_tick = { + .rating = 100, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static unsigned long tb_ticks_per_usec __read_mostly; + +void __delay(unsigned long loops) +{ + unsigned long bclock = get_tick(); + + while ((get_tick() - bclock) < loops) + ; +} +EXPORT_SYMBOL(__delay); + +void udelay(unsigned long usecs) +{ + __delay(tb_ticks_per_usec * usecs); +} +EXPORT_SYMBOL(udelay); + +static u64 clocksource_tick_read(struct clocksource *cs) +{ + return get_tick(); +} + +static void __init get_tick_patch(void) +{ + unsigned int *addr, *instr, i; + struct get_tick_patch *p; + + if (tlb_type == spitfire && is_hummingbird()) + return; + + for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) { + instr = (tlb_type == spitfire) ? p->tick : p->stick; + addr = (unsigned int *)(unsigned long)p->addr; + for (i = 0; i < GET_TICK_NINSTR; i++) { + addr[i] = instr[i]; + /* ensure that address is modified before flush */ + wmb(); + flushi(&addr[i]); + } + } +} + +static void __init init_tick_ops(struct sparc64_tick_ops *ops) +{ + unsigned long freq, quotient, tick; + + freq = ops->get_frequency(); + quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); + tick = ops->get_tick(); + + ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT; + ops->ticks_per_nsec_quotient = quotient; + ops->frequency = freq; + tick_operations = *ops; + get_tick_patch(); +} + +void __init time_init_early(void) +{ + if (tlb_type == spitfire) { + if (is_hummingbird()) { + init_tick_ops(&hbtick_operations); + clocksource_tick.archdata.vclock_mode = VCLOCK_NONE; + } else { + init_tick_ops(&tick_operations); + clocksource_tick.archdata.vclock_mode = VCLOCK_TICK; + } + } else { + init_tick_ops(&stick_operations); + clocksource_tick.archdata.vclock_mode = VCLOCK_STICK; + } +} + +void __init time_init(void) +{ + unsigned long freq; + + freq = tick_operations.frequency; + tb_ticks_per_usec = freq / USEC_PER_SEC; + + clocksource_tick.name = tick_operations.name; + clocksource_tick.read = clocksource_tick_read; + + clocksource_register_hz(&clocksource_tick, freq); + printk("clocksource: mult[%x] shift[%d]\n", + clocksource_tick.mult, clocksource_tick.shift); + + sparc64_clockevent.name = tick_operations.name; + clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4); + + sparc64_clockevent.max_delta_ns = + clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent); + sparc64_clockevent.max_delta_ticks = 0x7fffffffffffffffUL; + sparc64_clockevent.min_delta_ns = + clockevent_delta2ns(0xF, &sparc64_clockevent); + sparc64_clockevent.min_delta_ticks = 0xF; + + printk("clockevent: mult[%x] shift[%d]\n", + sparc64_clockevent.mult, sparc64_clockevent.shift); + + setup_sparc64_timer(); +} + +unsigned long long sched_clock(void) +{ + unsigned long quotient = tick_operations.ticks_per_nsec_quotient; + unsigned long offset = tick_operations.offset; + + /* Use barrier so the compiler emits the loads first and overlaps load + * latency with reading tick, because reading %tick/%stick is a + * post-sync instruction that will flush and restart subsequent + * instructions after it commits. + */ + barrier(); + + return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset; +} + +int read_current_timer(unsigned long *timer_val) +{ + *timer_val = get_tick(); + return 0; +} diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S new file mode 100644 index 000000000..82fafeeb3 --- /dev/null +++ b/arch/sparc/kernel/trampoline_32.S @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * trampoline.S: SMP cpu boot-up trampoline code. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + .globl sun4m_cpu_startup + .globl sun4d_cpu_startup + + .align 4 + +/* When we start up a cpu for the first time it enters this routine. + * This initializes the chip from whatever state the prom left it + * in and sets PIL in %psr to 15, no irqs. + */ + +sun4m_cpu_startup: +cpu1_startup: + sethi %hi(trapbase_cpu1), %g3 + b 1f + or %g3, %lo(trapbase_cpu1), %g3 + +cpu2_startup: + sethi %hi(trapbase_cpu2), %g3 + b 1f + or %g3, %lo(trapbase_cpu2), %g3 + +cpu3_startup: + sethi %hi(trapbase_cpu3), %g3 + b 1f + or %g3, %lo(trapbase_cpu3), %g3 + +1: + /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */ + set (PSR_PIL | PSR_S | PSR_PS), %g1 + wr %g1, 0x0, %psr ! traps off though + WRITE_PAUSE + + /* Our %wim is one behind CWP */ + mov 2, %g1 + wr %g1, 0x0, %wim + WRITE_PAUSE + + /* This identifies "this cpu". */ + wr %g3, 0x0, %tbr + WRITE_PAUSE + + /* Give ourselves a stack and curptr. */ + set current_set, %g5 + srl %g3, 10, %g4 + and %g4, 0xc, %g4 + ld [%g5 + %g4], %g6 + + sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp + or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp + add %g6, %sp, %sp + + /* Turn on traps (PSR_ET). */ + rd %psr, %g1 + wr %g1, PSR_ET, %psr ! traps on + WRITE_PAUSE + + /* Init our caches, etc. */ + set poke_srmmu, %g5 + ld [%g5], %g5 + call %g5 + nop + + /* Start this processor. */ + call smp_callin + nop + + b,a smp_panic + + .text + .align 4 + +smp_panic: + call cpu_panic + nop + +/* CPUID in bootbus can be found at PA 0xff0140000 */ +#define SUN4D_BOOTBUS_CPUID 0xf0140000 + + .align 4 + +sun4d_cpu_startup: + /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */ + set (PSR_PIL | PSR_S | PSR_PS), %g1 + wr %g1, 0x0, %psr ! traps off though + WRITE_PAUSE + + /* Our %wim is one behind CWP */ + mov 2, %g1 + wr %g1, 0x0, %wim + WRITE_PAUSE + + /* Set tbr - we use just one trap table. */ + set trapbase, %g1 + wr %g1, 0x0, %tbr + WRITE_PAUSE + + /* Get our CPU id out of bootbus */ + set SUN4D_BOOTBUS_CPUID, %g3 + lduba [%g3] ASI_M_CTL, %g3 + and %g3, 0xf8, %g3 + srl %g3, 3, %g1 + sta %g1, [%g0] ASI_M_VIKING_TMP1 + + /* Give ourselves a stack and curptr. */ + set current_set, %g5 + srl %g3, 1, %g4 + ld [%g5 + %g4], %g6 + + sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp + or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp + add %g6, %sp, %sp + + /* Turn on traps (PSR_ET). */ + rd %psr, %g1 + wr %g1, PSR_ET, %psr ! traps on + WRITE_PAUSE + + /* Init our caches, etc. */ + set poke_srmmu, %g5 + ld [%g5], %g5 + call %g5 + nop + + /* Start this processor. */ + call smp_callin + nop + + b,a smp_panic + + .align 4 + .global leon_smp_cpu_startup, smp_penguin_ctable + +leon_smp_cpu_startup: + + set smp_penguin_ctable,%g1 + ld [%g1+4],%g1 + srl %g1,4,%g1 + set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */ + sta %g1, [%g5] ASI_LEON_MMUREGS + + /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */ + set (PSR_PIL | PSR_S | PSR_PS), %g1 + wr %g1, 0x0, %psr ! traps off though + WRITE_PAUSE + + /* Our %wim is one behind CWP */ + mov 2, %g1 + wr %g1, 0x0, %wim + WRITE_PAUSE + + /* Set tbr - we use just one trap table. */ + set trapbase, %g1 + wr %g1, 0x0, %tbr + WRITE_PAUSE + + /* Get our CPU id */ + rd %asr17,%g3 + + /* Give ourselves a stack and curptr. */ + set current_set, %g5 + srl %g3, 28, %g4 + sll %g4, 2, %g4 + ld [%g5 + %g4], %g6 + + sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp + or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp + add %g6, %sp, %sp + + /* Turn on traps (PSR_ET). */ + rd %psr, %g1 + wr %g1, PSR_ET, %psr ! traps on + WRITE_PAUSE + + /* Init our caches, etc. */ + set poke_srmmu, %g5 + ld [%g5], %g5 + call %g5 + nop + + /* Start this processor. */ + call smp_callin + nop + + b,a smp_panic diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S new file mode 100644 index 000000000..51bf1eb92 --- /dev/null +++ b/arch/sparc/kernel/trampoline_64.S @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * trampoline.S: Jump start slave processors on sparc64. + * + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + .data + .align 8 +call_method: + .asciz "call-method" + .align 8 +itlb_load: + .asciz "SUNW,itlb-load" + .align 8 +dtlb_load: + .asciz "SUNW,dtlb-load" + +#define TRAMP_STACK_SIZE 1024 + .align 16 +tramp_stack: + .skip TRAMP_STACK_SIZE + + .align 8 + .globl sparc64_cpu_startup, sparc64_cpu_startup_end +sparc64_cpu_startup: + BRANCH_IF_SUN4V(g1, niagara_startup) + BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup) + BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup) + + ba,pt %xcc, spitfire_startup + nop + +cheetah_plus_startup: + /* Preserve OBP chosen DCU and DCR register settings. */ + ba,pt %xcc, cheetah_generic_startup + nop + +cheetah_startup: + mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1 + wr %g1, %asr18 + + sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5 + or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5 + sllx %g5, 32, %g5 + or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 + stxa %g5, [%g0] ASI_DCU_CONTROL_REG + membar #Sync + /* fallthru */ + +cheetah_generic_startup: + mov TSB_EXTENSION_P, %g3 + stxa %g0, [%g3] ASI_DMMU + stxa %g0, [%g3] ASI_IMMU + membar #Sync + + mov TSB_EXTENSION_S, %g3 + stxa %g0, [%g3] ASI_DMMU + membar #Sync + + mov TSB_EXTENSION_N, %g3 + stxa %g0, [%g3] ASI_DMMU + stxa %g0, [%g3] ASI_IMMU + membar #Sync + /* fallthru */ + +niagara_startup: + /* Disable STICK_INT interrupts. */ + sethi %hi(0x80000000), %g5 + sllx %g5, 32, %g5 + wr %g5, %asr25 + + ba,pt %xcc, startup_continue + nop + +spitfire_startup: + mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1 + stxa %g1, [%g0] ASI_LSU_CONTROL + membar #Sync + +startup_continue: + mov %o0, %l0 + BRANCH_IF_SUN4V(g1, niagara_lock_tlb) + + sethi %hi(0x80000000), %g2 + sllx %g2, 32, %g2 + wr %g2, 0, %tick_cmpr + + /* Call OBP by hand to lock KERNBASE into i/d tlbs. + * We lock 'num_kernel_image_mappings' consequetive entries. + */ + sethi %hi(prom_entry_lock), %g2 +1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 + brnz,pn %g1, 1b + nop + + /* Get onto temporary stack which will be in the locked + * kernel image. + */ + sethi %hi(tramp_stack), %g1 + or %g1, %lo(tramp_stack), %g1 + add %g1, TRAMP_STACK_SIZE, %g1 + sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp + flushw + + /* Setup the loop variables: + * %l3: VADDR base + * %l4: TTE base + * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings' + * %l6: Number of TTE entries to map + * %l7: Highest TTE entry number, we count down + */ + sethi %hi(KERNBASE), %l3 + sethi %hi(kern_locked_tte_data), %l4 + ldx [%l4 + %lo(kern_locked_tte_data)], %l4 + clr %l5 + sethi %hi(num_kernel_image_mappings), %l6 + lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 + + mov 15, %l7 + BRANCH_IF_ANY_CHEETAH(g1,g5,2f) + + mov 63, %l7 +2: + +3: + /* Lock into I-MMU */ + sethi %hi(call_method), %g2 + or %g2, %lo(call_method), %g2 + stx %g2, [%sp + 2047 + 128 + 0x00] + mov 5, %g2 + stx %g2, [%sp + 2047 + 128 + 0x08] + mov 1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x10] + sethi %hi(itlb_load), %g2 + or %g2, %lo(itlb_load), %g2 + stx %g2, [%sp + 2047 + 128 + 0x18] + sethi %hi(prom_mmu_ihandle_cache), %g2 + lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 + stx %g2, [%sp + 2047 + 128 + 0x20] + + /* Each TTE maps 4MB, convert index to offset. */ + sllx %l5, 22, %g1 + + add %l3, %g1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR + add %l4, %g1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE + + /* TTE index is highest minus loop index. */ + sub %l7, %l5, %g2 + stx %g2, [%sp + 2047 + 128 + 0x38] + + sethi %hi(p1275buf), %g2 + or %g2, %lo(p1275buf), %g2 + ldx [%g2 + 0x08], %o1 + call %o1 + add %sp, (2047 + 128), %o0 + + /* Lock into D-MMU */ + sethi %hi(call_method), %g2 + or %g2, %lo(call_method), %g2 + stx %g2, [%sp + 2047 + 128 + 0x00] + mov 5, %g2 + stx %g2, [%sp + 2047 + 128 + 0x08] + mov 1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x10] + sethi %hi(dtlb_load), %g2 + or %g2, %lo(dtlb_load), %g2 + stx %g2, [%sp + 2047 + 128 + 0x18] + sethi %hi(prom_mmu_ihandle_cache), %g2 + lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 + stx %g2, [%sp + 2047 + 128 + 0x20] + + /* Each TTE maps 4MB, convert index to offset. */ + sllx %l5, 22, %g1 + + add %l3, %g1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR + add %l4, %g1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE + + /* TTE index is highest minus loop index. */ + sub %l7, %l5, %g2 + stx %g2, [%sp + 2047 + 128 + 0x38] + + sethi %hi(p1275buf), %g2 + or %g2, %lo(p1275buf), %g2 + ldx [%g2 + 0x08], %o1 + call %o1 + add %sp, (2047 + 128), %o0 + + add %l5, 1, %l5 + cmp %l5, %l6 + bne,pt %xcc, 3b + nop + + sethi %hi(prom_entry_lock), %g2 + stb %g0, [%g2 + %lo(prom_entry_lock)] + + ba,pt %xcc, after_lock_tlb + nop + +niagara_lock_tlb: + sethi %hi(KERNBASE), %l3 + sethi %hi(kern_locked_tte_data), %l4 + ldx [%l4 + %lo(kern_locked_tte_data)], %l4 + clr %l5 + sethi %hi(num_kernel_image_mappings), %l6 + lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 + +1: + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 + sllx %l5, 22, %g2 + add %l3, %g2, %o0 + clr %o1 + add %l4, %g2, %o2 + mov HV_MMU_IMMU, %o3 + ta HV_FAST_TRAP + + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 + sllx %l5, 22, %g2 + add %l3, %g2, %o0 + clr %o1 + add %l4, %g2, %o2 + mov HV_MMU_DMMU, %o3 + ta HV_FAST_TRAP + + add %l5, 1, %l5 + cmp %l5, %l6 + bne,pt %xcc, 1b + nop + +after_lock_tlb: + wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate + wr %g0, 0, %fprs + + wr %g0, ASI_P, %asi + + mov PRIMARY_CONTEXT, %g7 + +661: stxa %g0, [%g7] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g0, [%g7] ASI_MMU + .previous + + membar #Sync + mov SECONDARY_CONTEXT, %g7 + +661: stxa %g0, [%g7] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g0, [%g7] ASI_MMU + .previous + + membar #Sync + + /* Everything we do here, until we properly take over the + * trap table, must be done with extreme care. We cannot + * make any references to %g6 (current thread pointer), + * %g4 (current task pointer), or %g5 (base of current cpu's + * per-cpu area) until we properly take over the trap table + * from the firmware and hypervisor. + * + * Get onto temporary stack which is in the locked kernel image. + */ + sethi %hi(tramp_stack), %g1 + or %g1, %lo(tramp_stack), %g1 + add %g1, TRAMP_STACK_SIZE, %g1 + sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp + mov 0, %fp + + /* Put garbage in these registers to trap any access to them. */ + set 0xdeadbeef, %g4 + set 0xdeadbeef, %g5 + set 0xdeadbeef, %g6 + + call init_irqwork_curcpu + nop + + sethi %hi(tlb_type), %g3 + lduw [%g3 + %lo(tlb_type)], %g2 + cmp %g2, 3 + bne,pt %icc, 1f + nop + + call hard_smp_processor_id + nop + + call sun4v_register_mondo_queues + nop + +1: call init_cur_cpu_trap + ldx [%l0], %o0 + + /* Start using proper page size encodings in ctx register. */ + sethi %hi(sparc64_kern_pri_context), %g3 + ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + + membar #Sync + + wrpr %g0, 0, %wstate + + sethi %hi(prom_entry_lock), %g2 +1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 + brnz,pn %g1, 1b + nop + + /* As a hack, put &init_thread_union into %g6. + * prom_world() loads from here to restore the %asi + * register. + */ + sethi %hi(init_thread_union), %g6 + or %g6, %lo(init_thread_union), %g6 + + sethi %hi(is_sun4v), %o0 + lduw [%o0 + %lo(is_sun4v)], %o0 + brz,pt %o0, 2f + nop + + TRAP_LOAD_TRAP_BLOCK(%g2, %g3) + add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + stxa %g2, [%g0] ASI_SCRATCHPAD + + /* Compute physical address: + * + * paddr = kern_base + (mmfsa_vaddr - KERNBASE) + */ + sethi %hi(KERNBASE), %g3 + sub %g2, %g3, %g2 + sethi %hi(kern_base), %g3 + ldx [%g3 + %lo(kern_base)], %g3 + add %g2, %g3, %o1 + sethi %hi(sparc64_ttable_tl0), %o0 + + set prom_set_trap_table_name, %g2 + stx %g2, [%sp + 2047 + 128 + 0x00] + mov 2, %g2 + stx %g2, [%sp + 2047 + 128 + 0x08] + mov 0, %g2 + stx %g2, [%sp + 2047 + 128 + 0x10] + stx %o0, [%sp + 2047 + 128 + 0x18] + stx %o1, [%sp + 2047 + 128 + 0x20] + sethi %hi(p1275buf), %g2 + or %g2, %lo(p1275buf), %g2 + ldx [%g2 + 0x08], %o1 + call %o1 + add %sp, (2047 + 128), %o0 + + ba,pt %xcc, 3f + nop + +2: sethi %hi(sparc64_ttable_tl0), %o0 + set prom_set_trap_table_name, %g2 + stx %g2, [%sp + 2047 + 128 + 0x00] + mov 1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x08] + mov 0, %g2 + stx %g2, [%sp + 2047 + 128 + 0x10] + stx %o0, [%sp + 2047 + 128 + 0x18] + sethi %hi(p1275buf), %g2 + or %g2, %lo(p1275buf), %g2 + ldx [%g2 + 0x08], %o1 + call %o1 + add %sp, (2047 + 128), %o0 + +3: sethi %hi(prom_entry_lock), %g2 + stb %g0, [%g2 + %lo(prom_entry_lock)] + + ldx [%l0], %g6 + ldx [%g6 + TI_TASK], %g4 + + mov 1, %g5 + sllx %g5, THREAD_SHIFT, %g5 + sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 + add %g6, %g5, %sp + + rdpr %pstate, %o1 + or %o1, PSTATE_IE, %o1 + wrpr %o1, 0, %pstate + + call smp_callin + nop + + call cpu_panic + nop +1: b,a,pt %xcc, 1b + + .align 8 +sparc64_cpu_startup_end: diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c new file mode 100644 index 000000000..179aabfa7 --- /dev/null +++ b/arch/sparc/kernel/traps_32.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sparc/kernel/traps.c + * + * Copyright 1995, 2008 David S. Miller (davem@davemloft.net) + * Copyright 2000 Jakub Jelinek (jakub@redhat.com) + */ + +/* + * I hate traps on the sparc, grrr... + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "entry.h" +#include "kernel.h" + +/* #define TRAP_DEBUG */ + +static void instruction_dump(unsigned long *pc) +{ + int i; + + if((((unsigned long) pc) & 3)) + return; + + for(i = -3; i < 6; i++) + printk("%c%08lx%c",i?' ':'<',pc[i],i?' ':'>'); + printk("\n"); +} + +#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") +#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") + +void __noreturn die_if_kernel(char *str, struct pt_regs *regs) +{ + static int die_counter; + int count = 0; + + /* Amuse the user. */ + printk( +" \\|/ ____ \\|/\n" +" \"@'/ ,. \\`@\"\n" +" /_| \\__/ |_\\\n" +" \\__U_/\n"); + + printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); + show_regs(regs); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + + __SAVE; __SAVE; __SAVE; __SAVE; + __SAVE; __SAVE; __SAVE; __SAVE; + __RESTORE; __RESTORE; __RESTORE; __RESTORE; + __RESTORE; __RESTORE; __RESTORE; __RESTORE; + + { + struct reg_window32 *rw = (struct reg_window32 *)regs->u_regs[UREG_FP]; + + /* Stop the back trace when we hit userland or we + * find some badly aligned kernel stack. Set an upper + * bound in case our stack is trashed and we loop. + */ + while(rw && + count++ < 30 && + (((unsigned long) rw) >= PAGE_OFFSET) && + !(((unsigned long) rw) & 0x7)) { + printk("Caller[%08lx]: %pS\n", rw->ins[7], + (void *) rw->ins[7]); + rw = (struct reg_window32 *)rw->ins[6]; + } + } + printk("Instruction DUMP:"); + instruction_dump ((unsigned long *) regs->pc); + make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV); +} + +void do_hw_interrupt(struct pt_regs *regs, unsigned long type) +{ + if(type < 0x80) { + /* Sun OS's puke from bad traps, Linux survives! */ + printk("Unimplemented Sparc TRAP, type = %02lx\n", type); + die_if_kernel("Whee... Hello Mr. Penguin", regs); + } + + if(regs->psr & PSR_PS) + die_if_kernel("Kernel bad trap", regs); + + force_sig_fault_trapno(SIGILL, ILL_ILLTRP, + (void __user *)regs->pc, type - 0x80); +} + +void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ + if(psr & PSR_PS) + die_if_kernel("Kernel illegal instruction", regs); +#ifdef TRAP_DEBUG + printk("Ill instr. at pc=%08lx instruction is %08lx\n", + regs->pc, *(unsigned long *)regs->pc); +#endif + + send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, current); +} + +void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ + if(psr & PSR_PS) + die_if_kernel("Penguin instruction from Penguin mode??!?!", regs); + send_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)pc, current); +} + +/* XXX User may want to be allowed to do this. XXX */ + +void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ + if(regs->psr & PSR_PS) { + printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc, + regs->u_regs[UREG_RETPC]); + die_if_kernel("BOGUS", regs); + /* die_if_kernel("Kernel MNA access", regs); */ + } +#if 0 + show_regs (regs); + instruction_dump ((unsigned long *) regs->pc); + printk ("do_MNA!\n"); +#endif + send_sig_fault(SIGBUS, BUS_ADRALN, + /* FIXME: Should dig out mna address */ (void *)0, + current); +} + +static unsigned long init_fsr = 0x0UL; +static unsigned long init_fregs[32] __attribute__ ((aligned (8))) = + { ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, + ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, + ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, + ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL }; + +void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ + /* Sanity check... */ + if(psr & PSR_PS) + die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs); + + put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */ + regs->psr |= PSR_EF; +#ifndef CONFIG_SMP + if(last_task_used_math == current) + return; + if(last_task_used_math) { + /* Other processes fpu state, save away */ + struct task_struct *fptask = last_task_used_math; + fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr, + &fptask->thread.fpqueue[0], &fptask->thread.fpqdepth); + } + last_task_used_math = current; + if(used_math()) { + fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); + } else { + /* Set initial sane state. */ + fpload(&init_fregs[0], &init_fsr); + set_used_math(); + } +#else + if(!used_math()) { + fpload(&init_fregs[0], &init_fsr); + set_used_math(); + } else { + fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); + } + set_thread_flag(TIF_USEDFPU); +#endif +} + +static unsigned long fake_regs[32] __attribute__ ((aligned (8))); +static unsigned long fake_fsr; +static unsigned long fake_queue[32] __attribute__ ((aligned (8))); +static unsigned long fake_depth; + +void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ + static int calls; + unsigned long fsr; + int ret = 0; + int code; +#ifndef CONFIG_SMP + struct task_struct *fpt = last_task_used_math; +#else + struct task_struct *fpt = current; +#endif + put_psr(get_psr() | PSR_EF); + /* If nobody owns the fpu right now, just clear the + * error into our fake static buffer and hope it don't + * happen again. Thank you crashme... + */ +#ifndef CONFIG_SMP + if(!fpt) { +#else + if (!test_tsk_thread_flag(fpt, TIF_USEDFPU)) { +#endif + fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); + regs->psr &= ~PSR_EF; + return; + } + fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr, + &fpt->thread.fpqueue[0], &fpt->thread.fpqdepth); +#ifdef DEBUG_FPU + printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr); +#endif + + switch ((fpt->thread.fsr & 0x1c000)) { + /* switch on the contents of the ftt [floating point trap type] field */ +#ifdef DEBUG_FPU + case (1 << 14): + printk("IEEE_754_exception\n"); + break; +#endif + case (2 << 14): /* unfinished_FPop (underflow & co) */ + case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */ + ret = do_mathemu(regs, fpt); + break; +#ifdef DEBUG_FPU + case (4 << 14): + printk("sequence_error (OS bug...)\n"); + break; + case (5 << 14): + printk("hardware_error (uhoh!)\n"); + break; + case (6 << 14): + printk("invalid_fp_register (user error)\n"); + break; +#endif /* DEBUG_FPU */ + } + /* If we successfully emulated the FPop, we pretend the trap never happened :-> */ + if (ret) { + fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); + return; + } + /* nope, better SIGFPE the offending process... */ + +#ifdef CONFIG_SMP + clear_tsk_thread_flag(fpt, TIF_USEDFPU); +#endif + if(psr & PSR_PS) { + /* The first fsr store/load we tried trapped, + * the second one will not (we hope). + */ + printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n", + regs->pc); + regs->pc = regs->npc; + regs->npc += 4; + calls++; + if(calls > 2) + die_if_kernel("Too many Penguin-FPU traps from kernel mode", + regs); + return; + } + + fsr = fpt->thread.fsr; + code = FPE_FLTUNK; + if ((fsr & 0x1c000) == (1 << 14)) { + if (fsr & 0x10) + code = FPE_FLTINV; + else if (fsr & 0x08) + code = FPE_FLTOVF; + else if (fsr & 0x04) + code = FPE_FLTUND; + else if (fsr & 0x02) + code = FPE_FLTDIV; + else if (fsr & 0x01) + code = FPE_FLTRES; + } + send_sig_fault(SIGFPE, code, (void __user *)pc, fpt); +#ifndef CONFIG_SMP + last_task_used_math = NULL; +#endif + regs->psr &= ~PSR_EF; + if(calls > 0) + calls=0; +} + +void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ + if(psr & PSR_PS) + die_if_kernel("Penguin overflow trap from kernel mode", regs); + send_sig_fault(SIGEMT, EMT_TAGOVF, (void __user *)pc, current); +} + +void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ +#ifdef TRAP_DEBUG + printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n", + pc, npc, psr); +#endif + if(psr & PSR_PS) + panic("Tell me what a watchpoint trap is, and I'll then deal " + "with such a beast..."); +} + +void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ +#ifdef TRAP_DEBUG + printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n", + pc, npc, psr); +#endif + force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)pc); +} + +void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ + send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, current); +} + +void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ +#ifdef TRAP_DEBUG + printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n", + pc, npc, psr); +#endif + send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, current); +} + +void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc, + unsigned long psr) +{ + send_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)pc, current); +} + +#ifdef CONFIG_DEBUG_BUGVERBOSE +void do_BUG(const char *file, int line) +{ + // bust_spinlocks(1); XXX Not in our original BUG() + printk("kernel BUG at %s:%d!\n", file, line); +} +EXPORT_SYMBOL(do_BUG); +#endif + +/* Since we have our mappings set up, on multiprocessors we can spin them + * up here so that timer interrupts work during initialization. + */ + +void trap_init(void) +{ + extern void thread_info_offsets_are_bolixed_pete(void); + + /* Force linker to barf if mismatched */ + if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) || + TI_TASK != offsetof(struct thread_info, task) || + TI_FLAGS != offsetof(struct thread_info, flags) || + TI_CPU != offsetof(struct thread_info, cpu) || + TI_PREEMPT != offsetof(struct thread_info, preempt_count) || + TI_SOFTIRQ != offsetof(struct thread_info, softirq_count) || + TI_HARDIRQ != offsetof(struct thread_info, hardirq_count) || + TI_KSP != offsetof(struct thread_info, ksp) || + TI_KPC != offsetof(struct thread_info, kpc) || + TI_KPSR != offsetof(struct thread_info, kpsr) || + TI_KWIM != offsetof(struct thread_info, kwim) || + TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || + TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || + TI_W_SAVED != offsetof(struct thread_info, w_saved)) + thread_info_offsets_are_bolixed_pete(); + + /* Attach to the address space of init_task. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + + /* NOTE: Other cpus have this done as they are started + * up on SMP. + */ +} diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c new file mode 100644 index 000000000..5b4de4a89 --- /dev/null +++ b/arch/sparc/kernel/traps_64.c @@ -0,0 +1,2924 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* arch/sparc64/kernel/traps.c + * + * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net) + * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com) + */ + +/* + * I like traps on v9, :)))) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "entry.h" +#include "kernel.h" +#include "kstack.h" + +/* When an irrecoverable trap occurs at tl > 0, the trap entry + * code logs the trap state registers at every level in the trap + * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout + * is as follows: + */ +struct tl1_traplog { + struct { + unsigned long tstate; + unsigned long tpc; + unsigned long tnpc; + unsigned long tt; + } trapstack[4]; + unsigned long tl; +}; + +static void dump_tl1_traplog(struct tl1_traplog *p) +{ + int i, limit; + + printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, " + "dumping track stack.\n", p->tl); + + limit = (tlb_type == hypervisor) ? 2 : 4; + for (i = 0; i < limit; i++) { + printk(KERN_EMERG + "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " + "TNPC[%016lx] TT[%lx]\n", + i + 1, + p->trapstack[i].tstate, p->trapstack[i].tpc, + p->trapstack[i].tnpc, p->trapstack[i].tt); + printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); + } +} + +void bad_trap(struct pt_regs *regs, long lvl) +{ + char buffer[36]; + + if (notify_die(DIE_TRAP, "bad trap", regs, + 0, lvl, SIGTRAP) == NOTIFY_STOP) + return; + + if (lvl < 0x100) { + sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl); + die_if_kernel(buffer, regs); + } + + lvl -= 0x100; + if (regs->tstate & TSTATE_PRIV) { + sprintf(buffer, "Kernel bad sw trap %lx", lvl); + die_if_kernel(buffer, regs); + } + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + force_sig_fault_trapno(SIGILL, ILL_ILLTRP, + (void __user *)regs->tpc, lvl); +} + +void bad_trap_tl1(struct pt_regs *regs, long lvl) +{ + char buffer[36]; + + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, + 0, lvl, SIGTRAP) == NOTIFY_STOP) + return; + + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + sprintf (buffer, "Bad trap %lx at tl>0", lvl); + die_if_kernel (buffer, regs); +} + +#ifdef CONFIG_DEBUG_BUGVERBOSE +void do_BUG(const char *file, int line) +{ + bust_spinlocks(1); + printk("kernel BUG at %s:%d!\n", file, line); +} +EXPORT_SYMBOL(do_BUG); +#endif + +static DEFINE_SPINLOCK(dimm_handler_lock); +static dimm_printer_t dimm_handler; + +static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen) +{ + unsigned long flags; + int ret = -ENODEV; + + spin_lock_irqsave(&dimm_handler_lock, flags); + if (dimm_handler) { + ret = dimm_handler(synd_code, paddr, buf, buflen); + } else if (tlb_type == spitfire) { + if (prom_getunumber(synd_code, paddr, buf, buflen) == -1) + ret = -EINVAL; + else + ret = 0; + } else + ret = -ENODEV; + spin_unlock_irqrestore(&dimm_handler_lock, flags); + + return ret; +} + +int register_dimm_printer(dimm_printer_t func) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&dimm_handler_lock, flags); + if (!dimm_handler) + dimm_handler = func; + else + ret = -EEXIST; + spin_unlock_irqrestore(&dimm_handler_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(register_dimm_printer); + +void unregister_dimm_printer(dimm_printer_t func) +{ + unsigned long flags; + + spin_lock_irqsave(&dimm_handler_lock, flags); + if (dimm_handler == func) + dimm_handler = NULL; + spin_unlock_irqrestore(&dimm_handler_lock, flags); +} +EXPORT_SYMBOL_GPL(unregister_dimm_printer); + +void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, "instruction access exception", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + goto out; + + if (regs->tstate & TSTATE_PRIV) { + printk("spitfire_insn_access_exception: SFSR[%016lx] " + "SFAR[%016lx], going.\n", sfsr, sfar); + die_if_kernel("Iax", regs); + } + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)regs->tpc); +out: + exception_exit(prev_state); +} + +void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) +{ + if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + spitfire_insn_access_exception(regs, sfsr, sfar); +} + +void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + unsigned short type = (type_ctx >> 16); + unsigned short ctx = (type_ctx & 0xffff); + + if (notify_die(DIE_TRAP, "instruction access exception", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + if (regs->tstate & TSTATE_PRIV) { + printk("sun4v_insn_access_exception: ADDR[%016lx] " + "CTX[%04x] TYPE[%04x], going.\n", + addr, ctx, type); + die_if_kernel("Iax", regs); + } + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *) addr); +} + +void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + sun4v_insn_access_exception(regs, addr, type_ctx); +} + +bool is_no_fault_exception(struct pt_regs *regs) +{ + unsigned char asi; + u32 insn; + + if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT) + return false; + + /* + * Must do a little instruction decoding here in order to + * decide on a course of action. The bits of interest are: + * insn[31:30] = op, where 3 indicates the load/store group + * insn[24:19] = op3, which identifies individual opcodes + * insn[13] indicates an immediate offset + * op3[4]=1 identifies alternate space instructions + * op3[5:4]=3 identifies floating point instructions + * op3[2]=1 identifies stores + * See "Opcode Maps" in the appendix of any Sparc V9 + * architecture spec for full details. + */ + if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */ + if (insn & 0x2000) /* immediate offset */ + asi = (regs->tstate >> 24); /* saved %asi */ + else + asi = (insn >> 5); /* immediate asi */ + if ((asi & 0xf6) == ASI_PNF) { + if (insn & 0x200000) /* op3[2], stores */ + return false; + if (insn & 0x1000000) /* op3[5:4]=3 (fp) */ + handle_ldf_stq(insn, regs); + else + handle_ld_nf(insn, regs); + return true; + } + } + return false; +} + +void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, "data access exception", regs, + 0, 0x30, SIGTRAP) == NOTIFY_STOP) + goto out; + + if (regs->tstate & TSTATE_PRIV) { + /* Test if this comes from uaccess places. */ + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->tpc); + if (entry) { + /* Ouch, somebody is trying VM hole tricks on us... */ +#ifdef DEBUG_EXCEPTIONS + printk("Exception: PC<%016lx> faddr\n", regs->tpc); + printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", + regs->tpc, entry->fixup); +#endif + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + goto out; + } + /* Shit... */ + printk("spitfire_data_access_exception: SFSR[%016lx] " + "SFAR[%016lx], going.\n", sfsr, sfar); + die_if_kernel("Dax", regs); + } + + if (is_no_fault_exception(regs)) + return; + + force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)sfar); +out: + exception_exit(prev_state); +} + +void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) +{ + if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, + 0, 0x30, SIGTRAP) == NOTIFY_STOP) + return; + + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + spitfire_data_access_exception(regs, sfsr, sfar); +} + +void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + unsigned short type = (type_ctx >> 16); + unsigned short ctx = (type_ctx & 0xffff); + + if (notify_die(DIE_TRAP, "data access exception", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + if (regs->tstate & TSTATE_PRIV) { + /* Test if this comes from uaccess places. */ + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->tpc); + if (entry) { + /* Ouch, somebody is trying VM hole tricks on us... */ +#ifdef DEBUG_EXCEPTIONS + printk("Exception: PC<%016lx> faddr\n", regs->tpc); + printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", + regs->tpc, entry->fixup); +#endif + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + return; + } + printk("sun4v_data_access_exception: ADDR[%016lx] " + "CTX[%04x] TYPE[%04x], going.\n", + addr, ctx, type); + die_if_kernel("Dax", regs); + } + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + if (is_no_fault_exception(regs)) + return; + + /* MCD (Memory Corruption Detection) disabled trap (TT=0x19) in HV + * is vectored thorugh data access exception trap with fault type + * set to HV_FAULT_TYPE_MCD_DIS. Check for MCD disabled trap. + * Accessing an address with invalid ASI for the address, for + * example setting an ADI tag on an address with ASI_MCD_PRIMARY + * when TTE.mcd is not set for the VA, is also vectored into + * kerbel by HV as data access exception with fault type set to + * HV_FAULT_TYPE_INV_ASI. + */ + switch (type) { + case HV_FAULT_TYPE_INV_ASI: + force_sig_fault(SIGILL, ILL_ILLADR, (void __user *)addr); + break; + case HV_FAULT_TYPE_MCD_DIS: + force_sig_fault(SIGSEGV, SEGV_ACCADI, (void __user *)addr); + break; + default: + force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)addr); + break; + } +} + +void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + sun4v_data_access_exception(regs, addr, type_ctx); +} + +#ifdef CONFIG_PCI +#include "pci_impl.h" +#endif + +/* When access exceptions happen, we must do this. */ +static void spitfire_clean_and_reenable_l1_caches(void) +{ + unsigned long va; + + if (tlb_type != spitfire) + BUG(); + + /* Clean 'em. */ + for (va = 0; va < (PAGE_SIZE << 1); va += 32) { + spitfire_put_icache_tag(va, 0x0); + spitfire_put_dcache_tag(va, 0x0); + } + + /* Re-enable in LSU. */ + __asm__ __volatile__("flush %%g6\n\t" + "membar #Sync\n\t" + "stxa %0, [%%g0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC | + LSU_CONTROL_IM | LSU_CONTROL_DM), + "i" (ASI_LSU_CONTROL) + : "memory"); +} + +static void spitfire_enable_estate_errors(void) +{ + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (ESTATE_ERR_ALL), + "i" (ASI_ESTATE_ERROR_EN)); +} + +static char ecc_syndrome_table[] = { + 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49, + 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a, + 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48, + 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c, + 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48, + 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29, + 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b, + 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48, + 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48, + 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e, + 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b, + 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48, + 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36, + 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48, + 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48, + 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, + 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48, + 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b, + 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32, + 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48, + 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b, + 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48, + 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48, + 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, + 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49, + 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48, + 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48, + 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b, + 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48, + 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b, + 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b, + 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a +}; + +static char *syndrome_unknown = ""; + +static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit) +{ + unsigned short scode; + char memmod_str[64], *p; + + if (udbl & bit) { + scode = ecc_syndrome_table[udbl & 0xff]; + if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0) + p = syndrome_unknown; + else + p = memmod_str; + printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] " + "Memory Module \"%s\"\n", + smp_processor_id(), scode, p); + } + + if (udbh & bit) { + scode = ecc_syndrome_table[udbh & 0xff]; + if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0) + p = syndrome_unknown; + else + p = memmod_str; + printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] " + "Memory Module \"%s\"\n", + smp_processor_id(), scode, p); + } + +} + +static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs) +{ + + printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " + "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n", + smp_processor_id(), afsr, afar, udbl, udbh, tl1); + + spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE); + + /* We always log it, even if someone is listening for this + * trap. + */ + notify_die(DIE_TRAP, "Correctable ECC Error", regs, + 0, TRAP_TYPE_CEE, SIGTRAP); + + /* The Correctable ECC Error trap does not disable I/D caches. So + * we only have to restore the ESTATE Error Enable register. + */ + spitfire_enable_estate_errors(); +} + +static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs) +{ + printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] " + "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n", + smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1); + + /* XXX add more human friendly logging of the error status + * XXX as is implemented for cheetah + */ + + spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE); + + /* We always log it, even if someone is listening for this + * trap. + */ + notify_die(DIE_TRAP, "Uncorrectable Error", regs, + 0, tt, SIGTRAP); + + if (regs->tstate & TSTATE_PRIV) { + if (tl1) + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("UE", regs); + } + + /* XXX need more intelligent processing here, such as is implemented + * XXX for cheetah errors, in fact if the E-cache still holds the + * XXX line with bad parity this will loop + */ + + spitfire_clean_and_reenable_l1_caches(); + spitfire_enable_estate_errors(); + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + force_sig_fault(SIGBUS, BUS_OBJERR, (void *)0); +} + +void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar) +{ + unsigned long afsr, tt, udbh, udbl; + int tl1; + + afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT; + tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT; + tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0; + udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT; + udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT; + +#ifdef CONFIG_PCI + if (tt == TRAP_TYPE_DAE && + pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { + spitfire_clean_and_reenable_l1_caches(); + spitfire_enable_estate_errors(); + + pci_poke_faulted = 1; + regs->tnpc = regs->tpc + 4; + return; + } +#endif + + if (afsr & SFAFSR_UE) + spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs); + + if (tt == TRAP_TYPE_CEE) { + /* Handle the case where we took a CEE trap, but ACK'd + * only the UE state in the UDB error registers. + */ + if (afsr & SFAFSR_UE) { + if (udbh & UDBE_CE) { + __asm__ __volatile__( + "stxa %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (udbh & UDBE_CE), + "r" (0x0), "i" (ASI_UDB_ERROR_W)); + } + if (udbl & UDBE_CE) { + __asm__ __volatile__( + "stxa %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (udbl & UDBE_CE), + "r" (0x18), "i" (ASI_UDB_ERROR_W)); + } + } + + spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs); + } +} + +int cheetah_pcache_forced_on; + +void cheetah_enable_pcache(void) +{ + unsigned long dcr; + + printk("CHEETAH: Enabling P-Cache on cpu %d.\n", + smp_processor_id()); + + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=r" (dcr) + : "i" (ASI_DCU_CONTROL_REG)); + dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL); + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (dcr), "i" (ASI_DCU_CONTROL_REG)); +} + +/* Cheetah error trap handling. */ +static unsigned long ecache_flush_physbase; +static unsigned long ecache_flush_linesize; +static unsigned long ecache_flush_size; + +/* This table is ordered in priority of errors and matches the + * AFAR overwrite policy as well. + */ + +struct afsr_error_table { + unsigned long mask; + const char *name; +}; + +static const char CHAFSR_PERR_msg[] = + "System interface protocol error"; +static const char CHAFSR_IERR_msg[] = + "Internal processor error"; +static const char CHAFSR_ISAP_msg[] = + "System request parity error on incoming address"; +static const char CHAFSR_UCU_msg[] = + "Uncorrectable E-cache ECC error for ifetch/data"; +static const char CHAFSR_UCC_msg[] = + "SW Correctable E-cache ECC error for ifetch/data"; +static const char CHAFSR_UE_msg[] = + "Uncorrectable system bus data ECC error for read"; +static const char CHAFSR_EDU_msg[] = + "Uncorrectable E-cache ECC error for stmerge/blkld"; +static const char CHAFSR_EMU_msg[] = + "Uncorrectable system bus MTAG error"; +static const char CHAFSR_WDU_msg[] = + "Uncorrectable E-cache ECC error for writeback"; +static const char CHAFSR_CPU_msg[] = + "Uncorrectable ECC error for copyout"; +static const char CHAFSR_CE_msg[] = + "HW corrected system bus data ECC error for read"; +static const char CHAFSR_EDC_msg[] = + "HW corrected E-cache ECC error for stmerge/blkld"; +static const char CHAFSR_EMC_msg[] = + "HW corrected system bus MTAG ECC error"; +static const char CHAFSR_WDC_msg[] = + "HW corrected E-cache ECC error for writeback"; +static const char CHAFSR_CPC_msg[] = + "HW corrected ECC error for copyout"; +static const char CHAFSR_TO_msg[] = + "Unmapped error from system bus"; +static const char CHAFSR_BERR_msg[] = + "Bus error response from system bus"; +static const char CHAFSR_IVC_msg[] = + "HW corrected system bus data ECC error for ivec read"; +static const char CHAFSR_IVU_msg[] = + "Uncorrectable system bus data ECC error for ivec read"; +static struct afsr_error_table __cheetah_error_table[] = { + { CHAFSR_PERR, CHAFSR_PERR_msg }, + { CHAFSR_IERR, CHAFSR_IERR_msg }, + { CHAFSR_ISAP, CHAFSR_ISAP_msg }, + { CHAFSR_UCU, CHAFSR_UCU_msg }, + { CHAFSR_UCC, CHAFSR_UCC_msg }, + { CHAFSR_UE, CHAFSR_UE_msg }, + { CHAFSR_EDU, CHAFSR_EDU_msg }, + { CHAFSR_EMU, CHAFSR_EMU_msg }, + { CHAFSR_WDU, CHAFSR_WDU_msg }, + { CHAFSR_CPU, CHAFSR_CPU_msg }, + { CHAFSR_CE, CHAFSR_CE_msg }, + { CHAFSR_EDC, CHAFSR_EDC_msg }, + { CHAFSR_EMC, CHAFSR_EMC_msg }, + { CHAFSR_WDC, CHAFSR_WDC_msg }, + { CHAFSR_CPC, CHAFSR_CPC_msg }, + { CHAFSR_TO, CHAFSR_TO_msg }, + { CHAFSR_BERR, CHAFSR_BERR_msg }, + /* These two do not update the AFAR. */ + { CHAFSR_IVC, CHAFSR_IVC_msg }, + { CHAFSR_IVU, CHAFSR_IVU_msg }, + { 0, NULL }, +}; +static const char CHPAFSR_DTO_msg[] = + "System bus unmapped error for prefetch/storequeue-read"; +static const char CHPAFSR_DBERR_msg[] = + "System bus error for prefetch/storequeue-read"; +static const char CHPAFSR_THCE_msg[] = + "Hardware corrected E-cache Tag ECC error"; +static const char CHPAFSR_TSCE_msg[] = + "SW handled correctable E-cache Tag ECC error"; +static const char CHPAFSR_TUE_msg[] = + "Uncorrectable E-cache Tag ECC error"; +static const char CHPAFSR_DUE_msg[] = + "System bus uncorrectable data ECC error due to prefetch/store-fill"; +static struct afsr_error_table __cheetah_plus_error_table[] = { + { CHAFSR_PERR, CHAFSR_PERR_msg }, + { CHAFSR_IERR, CHAFSR_IERR_msg }, + { CHAFSR_ISAP, CHAFSR_ISAP_msg }, + { CHAFSR_UCU, CHAFSR_UCU_msg }, + { CHAFSR_UCC, CHAFSR_UCC_msg }, + { CHAFSR_UE, CHAFSR_UE_msg }, + { CHAFSR_EDU, CHAFSR_EDU_msg }, + { CHAFSR_EMU, CHAFSR_EMU_msg }, + { CHAFSR_WDU, CHAFSR_WDU_msg }, + { CHAFSR_CPU, CHAFSR_CPU_msg }, + { CHAFSR_CE, CHAFSR_CE_msg }, + { CHAFSR_EDC, CHAFSR_EDC_msg }, + { CHAFSR_EMC, CHAFSR_EMC_msg }, + { CHAFSR_WDC, CHAFSR_WDC_msg }, + { CHAFSR_CPC, CHAFSR_CPC_msg }, + { CHAFSR_TO, CHAFSR_TO_msg }, + { CHAFSR_BERR, CHAFSR_BERR_msg }, + { CHPAFSR_DTO, CHPAFSR_DTO_msg }, + { CHPAFSR_DBERR, CHPAFSR_DBERR_msg }, + { CHPAFSR_THCE, CHPAFSR_THCE_msg }, + { CHPAFSR_TSCE, CHPAFSR_TSCE_msg }, + { CHPAFSR_TUE, CHPAFSR_TUE_msg }, + { CHPAFSR_DUE, CHPAFSR_DUE_msg }, + /* These two do not update the AFAR. */ + { CHAFSR_IVC, CHAFSR_IVC_msg }, + { CHAFSR_IVU, CHAFSR_IVU_msg }, + { 0, NULL }, +}; +static const char JPAFSR_JETO_msg[] = + "System interface protocol error, hw timeout caused"; +static const char JPAFSR_SCE_msg[] = + "Parity error on system snoop results"; +static const char JPAFSR_JEIC_msg[] = + "System interface protocol error, illegal command detected"; +static const char JPAFSR_JEIT_msg[] = + "System interface protocol error, illegal ADTYPE detected"; +static const char JPAFSR_OM_msg[] = + "Out of range memory error has occurred"; +static const char JPAFSR_ETP_msg[] = + "Parity error on L2 cache tag SRAM"; +static const char JPAFSR_UMS_msg[] = + "Error due to unsupported store"; +static const char JPAFSR_RUE_msg[] = + "Uncorrectable ECC error from remote cache/memory"; +static const char JPAFSR_RCE_msg[] = + "Correctable ECC error from remote cache/memory"; +static const char JPAFSR_BP_msg[] = + "JBUS parity error on returned read data"; +static const char JPAFSR_WBP_msg[] = + "JBUS parity error on data for writeback or block store"; +static const char JPAFSR_FRC_msg[] = + "Foreign read to DRAM incurring correctable ECC error"; +static const char JPAFSR_FRU_msg[] = + "Foreign read to DRAM incurring uncorrectable ECC error"; +static struct afsr_error_table __jalapeno_error_table[] = { + { JPAFSR_JETO, JPAFSR_JETO_msg }, + { JPAFSR_SCE, JPAFSR_SCE_msg }, + { JPAFSR_JEIC, JPAFSR_JEIC_msg }, + { JPAFSR_JEIT, JPAFSR_JEIT_msg }, + { CHAFSR_PERR, CHAFSR_PERR_msg }, + { CHAFSR_IERR, CHAFSR_IERR_msg }, + { CHAFSR_ISAP, CHAFSR_ISAP_msg }, + { CHAFSR_UCU, CHAFSR_UCU_msg }, + { CHAFSR_UCC, CHAFSR_UCC_msg }, + { CHAFSR_UE, CHAFSR_UE_msg }, + { CHAFSR_EDU, CHAFSR_EDU_msg }, + { JPAFSR_OM, JPAFSR_OM_msg }, + { CHAFSR_WDU, CHAFSR_WDU_msg }, + { CHAFSR_CPU, CHAFSR_CPU_msg }, + { CHAFSR_CE, CHAFSR_CE_msg }, + { CHAFSR_EDC, CHAFSR_EDC_msg }, + { JPAFSR_ETP, JPAFSR_ETP_msg }, + { CHAFSR_WDC, CHAFSR_WDC_msg }, + { CHAFSR_CPC, CHAFSR_CPC_msg }, + { CHAFSR_TO, CHAFSR_TO_msg }, + { CHAFSR_BERR, CHAFSR_BERR_msg }, + { JPAFSR_UMS, JPAFSR_UMS_msg }, + { JPAFSR_RUE, JPAFSR_RUE_msg }, + { JPAFSR_RCE, JPAFSR_RCE_msg }, + { JPAFSR_BP, JPAFSR_BP_msg }, + { JPAFSR_WBP, JPAFSR_WBP_msg }, + { JPAFSR_FRC, JPAFSR_FRC_msg }, + { JPAFSR_FRU, JPAFSR_FRU_msg }, + /* These two do not update the AFAR. */ + { CHAFSR_IVU, CHAFSR_IVU_msg }, + { 0, NULL }, +}; +static struct afsr_error_table *cheetah_error_table; +static unsigned long cheetah_afsr_errors; + +struct cheetah_err_info *cheetah_error_log; + +static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr) +{ + struct cheetah_err_info *p; + int cpu = smp_processor_id(); + + if (!cheetah_error_log) + return NULL; + + p = cheetah_error_log + (cpu * 2); + if ((afsr & CHAFSR_TL1) != 0UL) + p++; + + return p; +} + +extern unsigned int tl0_icpe[], tl1_icpe[]; +extern unsigned int tl0_dcpe[], tl1_dcpe[]; +extern unsigned int tl0_fecc[], tl1_fecc[]; +extern unsigned int tl0_cee[], tl1_cee[]; +extern unsigned int tl0_iae[], tl1_iae[]; +extern unsigned int tl0_dae[], tl1_dae[]; +extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[]; +extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[]; +extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[]; +extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[]; +extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[]; + +void __init cheetah_ecache_flush_init(void) +{ + unsigned long largest_size, smallest_linesize, order, ver; + int i, sz; + + /* Scan all cpu device tree nodes, note two values: + * 1) largest E-cache size + * 2) smallest E-cache line size + */ + largest_size = 0UL; + smallest_linesize = ~0UL; + + for (i = 0; i < NR_CPUS; i++) { + unsigned long val; + + val = cpu_data(i).ecache_size; + if (!val) + continue; + + if (val > largest_size) + largest_size = val; + + val = cpu_data(i).ecache_line_size; + if (val < smallest_linesize) + smallest_linesize = val; + + } + + if (largest_size == 0UL || smallest_linesize == ~0UL) { + prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache " + "parameters.\n"); + prom_halt(); + } + + ecache_flush_size = (2 * largest_size); + ecache_flush_linesize = smallest_linesize; + + ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); + + if (ecache_flush_physbase == ~0UL) { + prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte " + "contiguous physical memory.\n", + ecache_flush_size); + prom_halt(); + } + + /* Now allocate error trap reporting scoreboard. */ + sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info)); + for (order = 0; order < MAX_ORDER; order++) { + if ((PAGE_SIZE << order) >= sz) + break; + } + cheetah_error_log = (struct cheetah_err_info *) + __get_free_pages(GFP_KERNEL, order); + if (!cheetah_error_log) { + prom_printf("cheetah_ecache_flush_init: Failed to allocate " + "error logging scoreboard (%d bytes).\n", sz); + prom_halt(); + } + memset(cheetah_error_log, 0, PAGE_SIZE << order); + + /* Mark all AFSRs as invalid so that the trap handler will + * log new new information there. + */ + for (i = 0; i < 2 * NR_CPUS; i++) + cheetah_error_log[i].afsr = CHAFSR_INVALID; + + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + if ((ver >> 32) == __JALAPENO_ID || + (ver >> 32) == __SERRANO_ID) { + cheetah_error_table = &__jalapeno_error_table[0]; + cheetah_afsr_errors = JPAFSR_ERRORS; + } else if ((ver >> 32) == 0x003e0015) { + cheetah_error_table = &__cheetah_plus_error_table[0]; + cheetah_afsr_errors = CHPAFSR_ERRORS; + } else { + cheetah_error_table = &__cheetah_error_table[0]; + cheetah_afsr_errors = CHAFSR_ERRORS; + } + + /* Now patch trap tables. */ + memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4)); + memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4)); + memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4)); + memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4)); + memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4)); + memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4)); + memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4)); + memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4)); + if (tlb_type == cheetah_plus) { + memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4)); + memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4)); + memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4)); + memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4)); + } + flushi(PAGE_OFFSET); +} + +static void cheetah_flush_ecache(void) +{ + unsigned long flush_base = ecache_flush_physbase; + unsigned long flush_linesize = ecache_flush_linesize; + unsigned long flush_size = ecache_flush_size; + + __asm__ __volatile__("1: subcc %0, %4, %0\n\t" + " bne,pt %%xcc, 1b\n\t" + " ldxa [%2 + %0] %3, %%g0\n\t" + : "=&r" (flush_size) + : "0" (flush_size), "r" (flush_base), + "i" (ASI_PHYS_USE_EC), "r" (flush_linesize)); +} + +static void cheetah_flush_ecache_line(unsigned long physaddr) +{ + unsigned long alias; + + physaddr &= ~(8UL - 1UL); + physaddr = (ecache_flush_physbase + + (physaddr & ((ecache_flush_size>>1UL) - 1UL))); + alias = physaddr + (ecache_flush_size >> 1UL); + __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t" + "ldxa [%1] %2, %%g0\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (physaddr), "r" (alias), + "i" (ASI_PHYS_USE_EC)); +} + +/* Unfortunately, the diagnostic access to the I-cache tags we need to + * use to clear the thing interferes with I-cache coherency transactions. + * + * So we must only flush the I-cache when it is disabled. + */ +static void __cheetah_flush_icache(void) +{ + unsigned int icache_size, icache_line_size; + unsigned long addr; + + icache_size = local_cpu_data().icache_size; + icache_line_size = local_cpu_data().icache_line_size; + + /* Clear the valid bits in all the tags. */ + for (addr = 0; addr < icache_size; addr += icache_line_size) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (addr | (2 << 3)), + "i" (ASI_IC_TAG)); + } +} + +static void cheetah_flush_icache(void) +{ + unsigned long dcu_save; + + /* Save current DCU, disable I-cache. */ + __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" + "or %0, %2, %%g1\n\t" + "stxa %%g1, [%%g0] %1\n\t" + "membar #Sync" + : "=r" (dcu_save) + : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC) + : "g1"); + + __cheetah_flush_icache(); + + /* Restore DCU register */ + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG)); +} + +static void cheetah_flush_dcache(void) +{ + unsigned int dcache_size, dcache_line_size; + unsigned long addr; + + dcache_size = local_cpu_data().dcache_size; + dcache_line_size = local_cpu_data().dcache_line_size; + + for (addr = 0; addr < dcache_size; addr += dcache_line_size) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (addr), "i" (ASI_DCACHE_TAG)); + } +} + +/* In order to make the even parity correct we must do two things. + * First, we clear DC_data_parity and set DC_utag to an appropriate value. + * Next, we clear out all 32-bytes of data for that line. Data of + * all-zero + tag parity value of zero == correct parity. + */ +static void cheetah_plus_zap_dcache_parity(void) +{ + unsigned int dcache_size, dcache_line_size; + unsigned long addr; + + dcache_size = local_cpu_data().dcache_size; + dcache_line_size = local_cpu_data().dcache_line_size; + + for (addr = 0; addr < dcache_size; addr += dcache_line_size) { + unsigned long tag = (addr >> 14); + unsigned long line; + + __asm__ __volatile__("membar #Sync\n\t" + "stxa %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (tag), "r" (addr), + "i" (ASI_DCACHE_UTAG)); + for (line = addr; line < addr + dcache_line_size; line += 8) + __asm__ __volatile__("membar #Sync\n\t" + "stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (line), + "i" (ASI_DCACHE_DATA)); + } +} + +/* Conversion tables used to frob Cheetah AFSR syndrome values into + * something palatable to the memory controller driver get_unumber + * routine. + */ +#define MT0 137 +#define MT1 138 +#define MT2 139 +#define NONE 254 +#define MTC0 140 +#define MTC1 141 +#define MTC2 142 +#define MTC3 143 +#define C0 128 +#define C1 129 +#define C2 130 +#define C3 131 +#define C4 132 +#define C5 133 +#define C6 134 +#define C7 135 +#define C8 136 +#define M2 144 +#define M3 145 +#define M4 146 +#define M 147 +static unsigned char cheetah_ecc_syntab[] = { +/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M, +/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16, +/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10, +/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M, +/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6, +/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4, +/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4, +/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3, +/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5, +/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M, +/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2, +/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3, +/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M, +/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3, +/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M, +/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M, +/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4, +/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M, +/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2, +/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M, +/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4, +/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3, +/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3, +/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2, +/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4, +/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M, +/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3, +/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M, +/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3, +/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M, +/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M, +/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M +}; +static unsigned char cheetah_mtag_syntab[] = { + NONE, MTC0, + MTC1, NONE, + MTC2, NONE, + NONE, MT0, + MTC3, NONE, + NONE, MT1, + NONE, MT2, + NONE, NONE +}; + +/* Return the highest priority error conditon mentioned. */ +static inline unsigned long cheetah_get_hipri(unsigned long afsr) +{ + unsigned long tmp = 0; + int i; + + for (i = 0; cheetah_error_table[i].mask; i++) { + if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL) + return tmp; + } + return tmp; +} + +static const char *cheetah_get_string(unsigned long bit) +{ + int i; + + for (i = 0; cheetah_error_table[i].mask; i++) { + if ((bit & cheetah_error_table[i].mask) != 0UL) + return cheetah_error_table[i].name; + } + return "???"; +} + +static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info, + unsigned long afsr, unsigned long afar, int recoverable) +{ + unsigned long hipri; + char unum[256]; + + printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + afsr, afar, + (afsr & CHAFSR_TL1) ? 1 : 0); + printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); + printk("%s" "ERROR(%d): ", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); + printk("TPC<%pS>\n", (void *) regs->tpc); + printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, + (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT, + (afsr & CHAFSR_ME) ? ", Multiple Errors" : "", + (afsr & CHAFSR_PRIV) ? ", Privileged" : ""); + hipri = cheetah_get_hipri(afsr); + printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + hipri, cheetah_get_string(hipri)); + + /* Try to get unumber if relevant. */ +#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \ + CHAFSR_CPC | CHAFSR_CPU | \ + CHAFSR_UE | CHAFSR_CE | \ + CHAFSR_EDC | CHAFSR_EDU | \ + CHAFSR_UCC | CHAFSR_UCU | \ + CHAFSR_WDU | CHAFSR_WDC) +#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU) + if (afsr & ESYND_ERRORS) { + int syndrome; + int ret; + + syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT; + syndrome = cheetah_ecc_syntab[syndrome]; + ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum)); + if (ret != -1) + printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), + smp_processor_id(), unum); + } else if (afsr & MSYND_ERRORS) { + int syndrome; + int ret; + + syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT; + syndrome = cheetah_mtag_syntab[syndrome]; + ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum)); + if (ret != -1) + printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), + smp_processor_id(), unum); + } + + /* Now dump the cache snapshots. */ + printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + (int) info->dcache_index, + info->dcache_tag, + info->dcache_utag, + info->dcache_stag); + printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + info->dcache_data[0], + info->dcache_data[1], + info->dcache_data[2], + info->dcache_data[3]); + printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] " + "u[%016llx] l[%016llx]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + (int) info->icache_index, + info->icache_tag, + info->icache_utag, + info->icache_stag, + info->icache_upper, + info->icache_lower); + printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + info->icache_data[0], + info->icache_data[1], + info->icache_data[2], + info->icache_data[3]); + printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + info->icache_data[4], + info->icache_data[5], + info->icache_data[6], + info->icache_data[7]); + printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + (int) info->ecache_index, info->ecache_tag); + printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + info->ecache_data[0], + info->ecache_data[1], + info->ecache_data[2], + info->ecache_data[3]); + + afsr = (afsr & ~hipri) & cheetah_afsr_errors; + while (afsr != 0UL) { + unsigned long bit = cheetah_get_hipri(afsr); + + printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n", + (recoverable ? KERN_WARNING : KERN_CRIT), + bit, cheetah_get_string(bit)); + + afsr &= ~bit; + } + + if (!recoverable) + printk(KERN_CRIT "ERROR: This condition is not recoverable.\n"); +} + +static int cheetah_recheck_errors(struct cheetah_err_info *logp) +{ + unsigned long afsr, afar; + int ret = 0; + + __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" + : "=r" (afsr) + : "i" (ASI_AFSR)); + if ((afsr & cheetah_afsr_errors) != 0) { + if (logp != NULL) { + __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" + : "=r" (afar) + : "i" (ASI_AFAR)); + logp->afsr = afsr; + logp->afar = afar; + } + ret = 1; + } + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" + "membar #Sync\n\t" + : : "r" (afsr), "i" (ASI_AFSR)); + + return ret; +} + +void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) +{ + struct cheetah_err_info local_snapshot, *p; + int recoverable; + + /* Flush E-cache */ + cheetah_flush_ecache(); + + p = cheetah_get_error_log(afsr); + if (!p) { + prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n", + afsr, afar); + prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", + smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); + prom_halt(); + } + + /* Grab snapshot of logged error. */ + memcpy(&local_snapshot, p, sizeof(local_snapshot)); + + /* If the current trap snapshot does not match what the + * trap handler passed along into our args, big trouble. + * In such a case, mark the local copy as invalid. + * + * Else, it matches and we mark the afsr in the non-local + * copy as invalid so we may log new error traps there. + */ + if (p->afsr != afsr || p->afar != afar) + local_snapshot.afsr = CHAFSR_INVALID; + else + p->afsr = CHAFSR_INVALID; + + cheetah_flush_icache(); + cheetah_flush_dcache(); + + /* Re-enable I-cache/D-cache */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_DCU_CONTROL_REG), + "i" (DCU_DC | DCU_IC) + : "g1"); + + /* Re-enable error reporting */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_ESTATE_ERROR_EN), + "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) + : "g1"); + + /* Decide if we can continue after handling this trap and + * logging the error. + */ + recoverable = 1; + if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) + recoverable = 0; + + /* Re-check AFSR/AFAR. What we are looking for here is whether a new + * error was logged while we had error reporting traps disabled. + */ + if (cheetah_recheck_errors(&local_snapshot)) { + unsigned long new_afsr = local_snapshot.afsr; + + /* If we got a new asynchronous error, die... */ + if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | + CHAFSR_WDU | CHAFSR_CPU | + CHAFSR_IVU | CHAFSR_UE | + CHAFSR_BERR | CHAFSR_TO)) + recoverable = 0; + } + + /* Log errors. */ + cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); + + if (!recoverable) + panic("Irrecoverable Fast-ECC error trap.\n"); + + /* Flush E-cache to kick the error trap handlers out. */ + cheetah_flush_ecache(); +} + +/* Try to fix a correctable error by pushing the line out from + * the E-cache. Recheck error reporting registers to see if the + * problem is intermittent. + */ +static int cheetah_fix_ce(unsigned long physaddr) +{ + unsigned long orig_estate; + unsigned long alias1, alias2; + int ret; + + /* Make sure correctable error traps are disabled. */ + __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t" + "andn %0, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %2\n\t" + "membar #Sync" + : "=&r" (orig_estate) + : "i" (ESTATE_ERROR_CEEN), + "i" (ASI_ESTATE_ERROR_EN) + : "g1"); + + /* We calculate alias addresses that will force the + * cache line in question out of the E-cache. Then + * we bring it back in with an atomic instruction so + * that we get it in some modified/exclusive state, + * then we displace it again to try and get proper ECC + * pushed back into the system. + */ + physaddr &= ~(8UL - 1UL); + alias1 = (ecache_flush_physbase + + (physaddr & ((ecache_flush_size >> 1) - 1))); + alias2 = alias1 + (ecache_flush_size >> 1); + __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t" + "ldxa [%1] %3, %%g0\n\t" + "casxa [%2] %3, %%g0, %%g0\n\t" + "ldxa [%0] %3, %%g0\n\t" + "ldxa [%1] %3, %%g0\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (alias1), "r" (alias2), + "r" (physaddr), "i" (ASI_PHYS_USE_EC)); + + /* Did that trigger another error? */ + if (cheetah_recheck_errors(NULL)) { + /* Try one more time. */ + __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t" + "membar #Sync" + : : "r" (physaddr), "i" (ASI_PHYS_USE_EC)); + if (cheetah_recheck_errors(NULL)) + ret = 2; + else + ret = 1; + } else { + /* No new error, intermittent problem. */ + ret = 0; + } + + /* Restore error enables. */ + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" + "membar #Sync" + : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN)); + + return ret; +} + +/* Return non-zero if PADDR is a valid physical memory address. */ +static int cheetah_check_main_memory(unsigned long paddr) +{ + unsigned long vaddr = PAGE_OFFSET + paddr; + + if (vaddr > (unsigned long) high_memory) + return 0; + + return kern_addr_valid(vaddr); +} + +void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) +{ + struct cheetah_err_info local_snapshot, *p; + int recoverable, is_memory; + + p = cheetah_get_error_log(afsr); + if (!p) { + prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n", + afsr, afar); + prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", + smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); + prom_halt(); + } + + /* Grab snapshot of logged error. */ + memcpy(&local_snapshot, p, sizeof(local_snapshot)); + + /* If the current trap snapshot does not match what the + * trap handler passed along into our args, big trouble. + * In such a case, mark the local copy as invalid. + * + * Else, it matches and we mark the afsr in the non-local + * copy as invalid so we may log new error traps there. + */ + if (p->afsr != afsr || p->afar != afar) + local_snapshot.afsr = CHAFSR_INVALID; + else + p->afsr = CHAFSR_INVALID; + + is_memory = cheetah_check_main_memory(afar); + + if (is_memory && (afsr & CHAFSR_CE) != 0UL) { + /* XXX Might want to log the results of this operation + * XXX somewhere... -DaveM + */ + cheetah_fix_ce(afar); + } + + { + int flush_all, flush_line; + + flush_all = flush_line = 0; + if ((afsr & CHAFSR_EDC) != 0UL) { + if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC) + flush_line = 1; + else + flush_all = 1; + } else if ((afsr & CHAFSR_CPC) != 0UL) { + if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC) + flush_line = 1; + else + flush_all = 1; + } + + /* Trap handler only disabled I-cache, flush it. */ + cheetah_flush_icache(); + + /* Re-enable I-cache */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_DCU_CONTROL_REG), + "i" (DCU_IC) + : "g1"); + + if (flush_all) + cheetah_flush_ecache(); + else if (flush_line) + cheetah_flush_ecache_line(afar); + } + + /* Re-enable error reporting */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_ESTATE_ERROR_EN), + "i" (ESTATE_ERROR_CEEN) + : "g1"); + + /* Decide if we can continue after handling this trap and + * logging the error. + */ + recoverable = 1; + if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) + recoverable = 0; + + /* Re-check AFSR/AFAR */ + (void) cheetah_recheck_errors(&local_snapshot); + + /* Log errors. */ + cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); + + if (!recoverable) + panic("Irrecoverable Correctable-ECC error trap.\n"); +} + +void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) +{ + struct cheetah_err_info local_snapshot, *p; + int recoverable, is_memory; + +#ifdef CONFIG_PCI + /* Check for the special PCI poke sequence. */ + if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { + cheetah_flush_icache(); + cheetah_flush_dcache(); + + /* Re-enable I-cache/D-cache */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_DCU_CONTROL_REG), + "i" (DCU_DC | DCU_IC) + : "g1"); + + /* Re-enable error reporting */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_ESTATE_ERROR_EN), + "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) + : "g1"); + + (void) cheetah_recheck_errors(NULL); + + pci_poke_faulted = 1; + regs->tpc += 4; + regs->tnpc = regs->tpc + 4; + return; + } +#endif + + p = cheetah_get_error_log(afsr); + if (!p) { + prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n", + afsr, afar); + prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", + smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); + prom_halt(); + } + + /* Grab snapshot of logged error. */ + memcpy(&local_snapshot, p, sizeof(local_snapshot)); + + /* If the current trap snapshot does not match what the + * trap handler passed along into our args, big trouble. + * In such a case, mark the local copy as invalid. + * + * Else, it matches and we mark the afsr in the non-local + * copy as invalid so we may log new error traps there. + */ + if (p->afsr != afsr || p->afar != afar) + local_snapshot.afsr = CHAFSR_INVALID; + else + p->afsr = CHAFSR_INVALID; + + is_memory = cheetah_check_main_memory(afar); + + { + int flush_all, flush_line; + + flush_all = flush_line = 0; + if ((afsr & CHAFSR_EDU) != 0UL) { + if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU) + flush_line = 1; + else + flush_all = 1; + } else if ((afsr & CHAFSR_BERR) != 0UL) { + if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR) + flush_line = 1; + else + flush_all = 1; + } + + cheetah_flush_icache(); + cheetah_flush_dcache(); + + /* Re-enable I/D caches */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_DCU_CONTROL_REG), + "i" (DCU_IC | DCU_DC) + : "g1"); + + if (flush_all) + cheetah_flush_ecache(); + else if (flush_line) + cheetah_flush_ecache_line(afar); + } + + /* Re-enable error reporting */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_ESTATE_ERROR_EN), + "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) + : "g1"); + + /* Decide if we can continue after handling this trap and + * logging the error. + */ + recoverable = 1; + if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) + recoverable = 0; + + /* Re-check AFSR/AFAR. What we are looking for here is whether a new + * error was logged while we had error reporting traps disabled. + */ + if (cheetah_recheck_errors(&local_snapshot)) { + unsigned long new_afsr = local_snapshot.afsr; + + /* If we got a new asynchronous error, die... */ + if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | + CHAFSR_WDU | CHAFSR_CPU | + CHAFSR_IVU | CHAFSR_UE | + CHAFSR_BERR | CHAFSR_TO)) + recoverable = 0; + } + + /* Log errors. */ + cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); + + /* "Recoverable" here means we try to yank the page from ever + * being newly used again. This depends upon a few things: + * 1) Must be main memory, and AFAR must be valid. + * 2) If we trapped from user, OK. + * 3) Else, if we trapped from kernel we must find exception + * table entry (ie. we have to have been accessing user + * space). + * + * If AFAR is not in main memory, or we trapped from kernel + * and cannot find an exception table entry, it is unacceptable + * to try and continue. + */ + if (recoverable && is_memory) { + if ((regs->tstate & TSTATE_PRIV) == 0UL) { + /* OK, usermode access. */ + recoverable = 1; + } else { + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->tpc); + if (entry) { + /* OK, kernel access to userspace. */ + recoverable = 1; + + } else { + /* BAD, privileged state is corrupted. */ + recoverable = 0; + } + + if (recoverable) { + if (pfn_valid(afar >> PAGE_SHIFT)) + get_page(pfn_to_page(afar >> PAGE_SHIFT)); + else + recoverable = 0; + + /* Only perform fixup if we still have a + * recoverable condition. + */ + if (recoverable) { + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + } + } + } + } else { + recoverable = 0; + } + + if (!recoverable) + panic("Irrecoverable deferred error trap.\n"); +} + +/* Handle a D/I cache parity error trap. TYPE is encoded as: + * + * Bit0: 0=dcache,1=icache + * Bit1: 0=recoverable,1=unrecoverable + * + * The hardware has disabled both the I-cache and D-cache in + * the %dcr register. + */ +void cheetah_plus_parity_error(int type, struct pt_regs *regs) +{ + if (type & 0x1) + __cheetah_flush_icache(); + else + cheetah_plus_zap_dcache_parity(); + cheetah_flush_dcache(); + + /* Re-enable I-cache/D-cache */ + __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" + "or %%g1, %1, %%g1\n\t" + "stxa %%g1, [%%g0] %0\n\t" + "membar #Sync" + : /* no outputs */ + : "i" (ASI_DCU_CONTROL_REG), + "i" (DCU_DC | DCU_IC) + : "g1"); + + if (type & 0x2) { + printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n", + smp_processor_id(), + (type & 0x1) ? 'I' : 'D', + regs->tpc); + printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); + panic("Irrecoverable Cheetah+ parity error."); + } + + printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n", + smp_processor_id(), + (type & 0x1) ? 'I' : 'D', + regs->tpc); + printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); +} + +struct sun4v_error_entry { + /* Unique error handle */ +/*0x00*/u64 err_handle; + + /* %stick value at the time of the error */ +/*0x08*/u64 err_stick; + +/*0x10*/u8 reserved_1[3]; + + /* Error type */ +/*0x13*/u8 err_type; +#define SUN4V_ERR_TYPE_UNDEFINED 0 +#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1 +#define SUN4V_ERR_TYPE_PRECISE_NONRES 2 +#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3 +#define SUN4V_ERR_TYPE_SHUTDOWN_RQST 4 +#define SUN4V_ERR_TYPE_DUMP_CORE 5 +#define SUN4V_ERR_TYPE_SP_STATE_CHANGE 6 +#define SUN4V_ERR_TYPE_NUM 7 + + /* Error attributes */ +/*0x14*/u32 err_attrs; +#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001 +#define SUN4V_ERR_ATTRS_MEMORY 0x00000002 +#define SUN4V_ERR_ATTRS_PIO 0x00000004 +#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008 +#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010 +#define SUN4V_ERR_ATTRS_SHUTDOWN_RQST 0x00000020 +#define SUN4V_ERR_ATTRS_ASR 0x00000040 +#define SUN4V_ERR_ATTRS_ASI 0x00000080 +#define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100 +#define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600 +#define SUN4V_ERR_ATTRS_MCD 0x00000800 +#define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9 +#define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000 +#define SUN4V_ERR_ATTRS_MODE_SHFT 24 +#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000 + +#define SUN4V_ERR_SPSTATE_FAULTED 0 +#define SUN4V_ERR_SPSTATE_AVAILABLE 1 +#define SUN4V_ERR_SPSTATE_NOT_PRESENT 2 + +#define SUN4V_ERR_MODE_USER 1 +#define SUN4V_ERR_MODE_PRIV 2 + + /* Real address of the memory region or PIO transaction */ +/*0x18*/u64 err_raddr; + + /* Size of the operation triggering the error, in bytes */ +/*0x20*/u32 err_size; + + /* ID of the CPU */ +/*0x24*/u16 err_cpu; + + /* Grace periof for shutdown, in seconds */ +/*0x26*/u16 err_secs; + + /* Value of the %asi register */ +/*0x28*/u8 err_asi; + +/*0x29*/u8 reserved_2; + + /* Value of the ASR register number */ +/*0x2a*/u16 err_asr; +#define SUN4V_ERR_ASR_VALID 0x8000 + +/*0x2c*/u32 reserved_3; +/*0x30*/u64 reserved_4; +/*0x38*/u64 reserved_5; +}; + +static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); +static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); + +static const char *sun4v_err_type_to_str(u8 type) +{ + static const char *types[SUN4V_ERR_TYPE_NUM] = { + "undefined", + "uncorrected resumable", + "precise nonresumable", + "deferred nonresumable", + "shutdown request", + "dump core", + "SP state change", + }; + + if (type < SUN4V_ERR_TYPE_NUM) + return types[type]; + + return "unknown"; +} + +static void sun4v_emit_err_attr_strings(u32 attrs) +{ + static const char *attr_names[] = { + "processor", + "memory", + "PIO", + "int-registers", + "fpu-registers", + "shutdown-request", + "ASR", + "ASI", + "priv-reg", + }; + static const char *sp_states[] = { + "sp-faulted", + "sp-available", + "sp-not-present", + "sp-state-reserved", + }; + static const char *modes[] = { + "mode-reserved0", + "user", + "priv", + "mode-reserved1", + }; + u32 sp_state, mode; + int i; + + for (i = 0; i < ARRAY_SIZE(attr_names); i++) { + if (attrs & (1U << i)) { + const char *s = attr_names[i]; + + pr_cont("%s ", s); + } + } + + sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >> + SUN4V_ERR_ATTRS_SPSTATE_SHFT); + pr_cont("%s ", sp_states[sp_state]); + + mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >> + SUN4V_ERR_ATTRS_MODE_SHFT); + pr_cont("%s ", modes[mode]); + + if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) + pr_cont("res-queue-full "); +} + +/* When the report contains a real-address of "-1" it means that the + * hardware did not provide the address. So we compute the effective + * address of the load or store instruction at regs->tpc and report + * that. Usually when this happens it's a PIO and in such a case we + * are using physical addresses with bypass ASIs anyways, so what we + * report here is exactly what we want. + */ +static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs) +{ + unsigned int insn; + u64 addr; + + if (!(regs->tstate & TSTATE_PRIV)) + return; + + insn = *(unsigned int *) regs->tpc; + + addr = compute_effective_address(regs, insn, 0); + + printk("%s: insn effective address [0x%016llx]\n", + pfx, addr); +} + +static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, + int cpu, const char *pfx, atomic_t *ocnt) +{ + u64 *raw_ptr = (u64 *) ent; + u32 attrs; + int cnt; + + printk("%s: Reporting on cpu %d\n", pfx, cpu); + printk("%s: TPC [0x%016lx] <%pS>\n", + pfx, regs->tpc, (void *) regs->tpc); + + printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n", + pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]); + printk("%s: %016llx:%016llx:%016llx:%016llx]\n", + pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]); + + printk("%s: handle [0x%016llx] stick [0x%016llx]\n", + pfx, ent->err_handle, ent->err_stick); + + printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type)); + + attrs = ent->err_attrs; + printk("%s: attrs [0x%08x] < ", pfx, attrs); + sun4v_emit_err_attr_strings(attrs); + pr_cont(">\n"); + + /* Various fields in the error report are only valid if + * certain attribute bits are set. + */ + if (attrs & (SUN4V_ERR_ATTRS_MEMORY | + SUN4V_ERR_ATTRS_PIO | + SUN4V_ERR_ATTRS_ASI)) { + printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr); + + if (ent->err_raddr == ~(u64)0) + sun4v_report_real_raddr(pfx, regs); + } + + if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI)) + printk("%s: size [0x%x]\n", pfx, ent->err_size); + + if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR | + SUN4V_ERR_ATTRS_INT_REGISTERS | + SUN4V_ERR_ATTRS_FPU_REGISTERS | + SUN4V_ERR_ATTRS_PRIV_REG)) + printk("%s: cpu[%u]\n", pfx, ent->err_cpu); + + if (attrs & SUN4V_ERR_ATTRS_ASI) + printk("%s: asi [0x%02x]\n", pfx, ent->err_asi); + + if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS | + SUN4V_ERR_ATTRS_FPU_REGISTERS | + SUN4V_ERR_ATTRS_PRIV_REG)) && + (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0) + printk("%s: reg [0x%04x]\n", + pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID); + + show_regs(regs); + + if ((cnt = atomic_read(ocnt)) != 0) { + atomic_set(ocnt, 0); + wmb(); + printk("%s: Queue overflowed %d times.\n", + pfx, cnt); + } +} + +/* Handle memory corruption detected error which is vectored in + * through resumable error trap. + */ +void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent) +{ + if (notify_die(DIE_TRAP, "MCD error", regs, 0, 0x34, + SIGSEGV) == NOTIFY_STOP) + return; + + if (regs->tstate & TSTATE_PRIV) { + /* MCD exception could happen because the task was + * running a system call with MCD enabled and passed a + * non-versioned pointer or pointer with bad version + * tag to the system call. In such cases, hypervisor + * places the address of offending instruction in the + * resumable error report. This is a deferred error, + * so the read/write that caused the trap was potentially + * retired long time back and we may have no choice + * but to send SIGSEGV to the process. + */ + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->tpc); + if (entry) { + /* Looks like a bad syscall parameter */ +#ifdef DEBUG_EXCEPTIONS + pr_emerg("Exception: PC<%016lx> faddr\n", + regs->tpc); + pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n", + ent.err_raddr, entry->fixup); +#endif + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + return; + } + } + + /* Send SIGSEGV to the userspace process with the right signal + * code + */ + force_sig_fault(SIGSEGV, SEGV_ADIDERR, (void __user *)ent.err_raddr); +} + +/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. + * Log the event and clear the first word of the entry. + */ +void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) +{ + enum ctx_state prev_state = exception_enter(); + struct sun4v_error_entry *ent, local_copy; + struct trap_per_cpu *tb; + unsigned long paddr; + int cpu; + + cpu = get_cpu(); + + tb = &trap_block[cpu]; + paddr = tb->resum_kernel_buf_pa + offset; + ent = __va(paddr); + + memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); + + /* We have a local copy now, so release the entry. */ + ent->err_handle = 0; + wmb(); + + put_cpu(); + + if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) { + /* We should really take the seconds field of + * the error report and use it for the shutdown + * invocation, but for now do the same thing we + * do for a DS shutdown request. + */ + pr_info("Shutdown request, %u seconds...\n", + local_copy.err_secs); + orderly_poweroff(true); + goto out; + } + + /* If this is a memory corruption detected error vectored in + * by HV through resumable error trap, call the handler + */ + if (local_copy.err_attrs & SUN4V_ERR_ATTRS_MCD) { + do_mcd_err(regs, local_copy); + return; + } + + sun4v_log_error(regs, &local_copy, cpu, + KERN_ERR "RESUMABLE ERROR", + &sun4v_resum_oflow_cnt); +out: + exception_exit(prev_state); +} + +/* If we try to printk() we'll probably make matters worse, by trying + * to retake locks this cpu already holds or causing more errors. So + * just bump a counter, and we'll report these counter bumps above. + */ +void sun4v_resum_overflow(struct pt_regs *regs) +{ + atomic_inc(&sun4v_resum_oflow_cnt); +} + +/* Given a set of registers, get the virtual addressi that was being accessed + * by the faulting instructions at tpc. + */ +static unsigned long sun4v_get_vaddr(struct pt_regs *regs) +{ + unsigned int insn; + + if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) { + return compute_effective_address(regs, insn, + (insn >> 25) & 0x1f); + } + return 0; +} + +/* Attempt to handle non-resumable errors generated from userspace. + * Returns true if the signal was handled, false otherwise. + */ +bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, + struct sun4v_error_entry *ent) { + + unsigned int attrs = ent->err_attrs; + + if (attrs & SUN4V_ERR_ATTRS_MEMORY) { + unsigned long addr = ent->err_raddr; + + if (addr == ~(u64)0) { + /* This seems highly unlikely to ever occur */ + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n"); + } else { + unsigned long page_cnt = DIV_ROUND_UP(ent->err_size, + PAGE_SIZE); + + /* Break the unfortunate news. */ + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n", + addr); + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n", + page_cnt); + + while (page_cnt-- > 0) { + if (pfn_valid(addr >> PAGE_SHIFT)) + get_page(pfn_to_page(addr >> PAGE_SHIFT)); + addr += PAGE_SIZE; + } + } + force_sig(SIGKILL); + + return true; + } + if (attrs & SUN4V_ERR_ATTRS_PIO) { + force_sig_fault(SIGBUS, BUS_ADRERR, + (void __user *)sun4v_get_vaddr(regs)); + return true; + } + + /* Default to doing nothing */ + return false; +} + +/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. + * Log the event, clear the first word of the entry, and die. + */ +void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) +{ + struct sun4v_error_entry *ent, local_copy; + struct trap_per_cpu *tb; + unsigned long paddr; + int cpu; + + cpu = get_cpu(); + + tb = &trap_block[cpu]; + paddr = tb->nonresum_kernel_buf_pa + offset; + ent = __va(paddr); + + memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); + + /* We have a local copy now, so release the entry. */ + ent->err_handle = 0; + wmb(); + + put_cpu(); + + if (!(regs->tstate & TSTATE_PRIV) && + sun4v_nonresum_error_user_handled(regs, &local_copy)) { + /* DON'T PANIC: This userspace error was handled. */ + return; + } + +#ifdef CONFIG_PCI + /* Check for the special PCI poke sequence. */ + if (pci_poke_in_progress && pci_poke_cpu == cpu) { + pci_poke_faulted = 1; + regs->tpc += 4; + regs->tnpc = regs->tpc + 4; + return; + } +#endif + + sun4v_log_error(regs, &local_copy, cpu, + KERN_EMERG "NON-RESUMABLE ERROR", + &sun4v_nonresum_oflow_cnt); + + panic("Non-resumable error."); +} + +/* If we try to printk() we'll probably make matters worse, by trying + * to retake locks this cpu already holds or causing more errors. So + * just bump a counter, and we'll report these counter bumps above. + */ +void sun4v_nonresum_overflow(struct pt_regs *regs) +{ + /* XXX Actually even this can make not that much sense. Perhaps + * XXX we should just pull the plug and panic directly from here? + */ + atomic_inc(&sun4v_nonresum_oflow_cnt); +} + +static void sun4v_tlb_error(struct pt_regs *regs) +{ + die_if_kernel("TLB/TSB error", regs); +} + +unsigned long sun4v_err_itlb_vaddr; +unsigned long sun4v_err_itlb_ctx; +unsigned long sun4v_err_itlb_pte; +unsigned long sun4v_err_itlb_error; + +void sun4v_itlb_error_report(struct pt_regs *regs, int tl) +{ + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", + (void *) regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", + sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, + sun4v_err_itlb_pte, sun4v_err_itlb_error); + + sun4v_tlb_error(regs); +} + +unsigned long sun4v_err_dtlb_vaddr; +unsigned long sun4v_err_dtlb_ctx; +unsigned long sun4v_err_dtlb_pte; +unsigned long sun4v_err_dtlb_error; + +void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) +{ + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", + (void *) regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", + sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, + sun4v_err_dtlb_pte, sun4v_err_dtlb_error); + + sun4v_tlb_error(regs); +} + +void hypervisor_tlbop_error(unsigned long err, unsigned long op) +{ + printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n", + err, op); +} + +void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) +{ + printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n", + err, op); +} + +static void do_fpe_common(struct pt_regs *regs) +{ + if (regs->tstate & TSTATE_PRIV) { + regs->tpc = regs->tnpc; + regs->tnpc += 4; + } else { + unsigned long fsr = current_thread_info()->xfsr[0]; + int code; + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + code = FPE_FLTUNK; + if ((fsr & 0x1c000) == (1 << 14)) { + if (fsr & 0x10) + code = FPE_FLTINV; + else if (fsr & 0x08) + code = FPE_FLTOVF; + else if (fsr & 0x04) + code = FPE_FLTUND; + else if (fsr & 0x02) + code = FPE_FLTDIV; + else if (fsr & 0x01) + code = FPE_FLTRES; + } + force_sig_fault(SIGFPE, code, (void __user *)regs->tpc); + } +} + +void do_fpieee(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, "fpu exception ieee", regs, + 0, 0x24, SIGFPE) == NOTIFY_STOP) + goto out; + + do_fpe_common(regs); +out: + exception_exit(prev_state); +} + +void do_fpother(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + struct fpustate *f = FPUSTATE; + int ret = 0; + + if (notify_die(DIE_TRAP, "fpu exception other", regs, + 0, 0x25, SIGFPE) == NOTIFY_STOP) + goto out; + + switch ((current_thread_info()->xfsr[0] & 0x1c000)) { + case (2 << 14): /* unfinished_FPop */ + case (3 << 14): /* unimplemented_FPop */ + ret = do_mathemu(regs, f, false); + break; + } + if (ret) + goto out; + do_fpe_common(regs); +out: + exception_exit(prev_state); +} + +void do_tof(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs, + 0, 0x26, SIGEMT) == NOTIFY_STOP) + goto out; + + if (regs->tstate & TSTATE_PRIV) + die_if_kernel("Penguin overflow trap from kernel mode", regs); + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + force_sig_fault(SIGEMT, EMT_TAGOVF, (void __user *)regs->tpc); +out: + exception_exit(prev_state); +} + +void do_div0(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, "integer division by zero", regs, + 0, 0x28, SIGFPE) == NOTIFY_STOP) + goto out; + + if (regs->tstate & TSTATE_PRIV) + die_if_kernel("TL0: Kernel divide by zero.", regs); + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->tpc); +out: + exception_exit(prev_state); +} + +static void instruction_dump(unsigned int *pc) +{ + int i; + + if ((((unsigned long) pc) & 3)) + return; + + printk("Instruction DUMP:"); + for (i = -3; i < 6; i++) + printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>'); + printk("\n"); +} + +static void user_instruction_dump(unsigned int __user *pc) +{ + int i; + unsigned int buf[9]; + + if ((((unsigned long) pc) & 3)) + return; + + if (copy_from_user(buf, pc - 3, sizeof(buf))) + return; + + printk("Instruction DUMP:"); + for (i = 0; i < 9; i++) + printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>'); + printk("\n"); +} + +void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl) +{ + unsigned long fp, ksp; + struct thread_info *tp; + int count = 0; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int graph = 0; +#endif + + ksp = (unsigned long) _ksp; + if (!tsk) + tsk = current; + tp = task_thread_info(tsk); + if (ksp == 0UL) { + if (tsk == current) + asm("mov %%fp, %0" : "=r" (ksp)); + else + ksp = tp->ksp; + } + if (tp == current_thread_info()) + flushw_all(); + + fp = ksp + STACK_BIAS; + + printk("%sCall Trace:\n", loglvl); + do { + struct sparc_stackf *sf; + struct pt_regs *regs; + unsigned long pc; + + if (!kstack_valid(tp, fp)) + break; + sf = (struct sparc_stackf *) fp; + regs = (struct pt_regs *) (sf + 1); + + if (kstack_is_trap_frame(tp, regs)) { + if (!(regs->tstate & TSTATE_PRIV)) + break; + pc = regs->tpc; + fp = regs->u_regs[UREG_I6] + STACK_BIAS; + } else { + pc = sf->callers_pc; + fp = (unsigned long)sf->fp + STACK_BIAS; + } + + print_ip_sym(loglvl, pc); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + struct ftrace_ret_stack *ret_stack; + ret_stack = ftrace_graph_get_ret_stack(tsk, graph); + if (ret_stack) { + pc = ret_stack->ret; + print_ip_sym(loglvl, pc); + graph++; + } + } +#endif + } while (++count < 16); +} + +static inline struct reg_window *kernel_stack_up(struct reg_window *rw) +{ + unsigned long fp = rw->ins[6]; + + if (!fp) + return NULL; + + return (struct reg_window *) (fp + STACK_BIAS); +} + +void __noreturn die_if_kernel(char *str, struct pt_regs *regs) +{ + static int die_counter; + int count = 0; + + /* Amuse the user. */ + printk( +" \\|/ ____ \\|/\n" +" \"@'/ .. \\`@\"\n" +" /_| \\__/ |_\\\n" +" \\__U_/\n"); + + printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); + notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV); + __asm__ __volatile__("flushw"); + show_regs(regs); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + if (regs->tstate & TSTATE_PRIV) { + struct thread_info *tp = current_thread_info(); + struct reg_window *rw = (struct reg_window *) + (regs->u_regs[UREG_FP] + STACK_BIAS); + + /* Stop the back trace when we hit userland or we + * find some badly aligned kernel stack. + */ + while (rw && + count++ < 30 && + kstack_valid(tp, (unsigned long) rw)) { + printk("Caller[%016lx]: %pS\n", rw->ins[7], + (void *) rw->ins[7]); + + rw = kernel_stack_up(rw); + } + instruction_dump ((unsigned int *) regs->tpc); + } else { + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + user_instruction_dump ((unsigned int __user *) regs->tpc); + } + if (panic_on_oops) + panic("Fatal exception"); + make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV); +} +EXPORT_SYMBOL(die_if_kernel); + +#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) +#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) + +void do_illegal_instruction(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + unsigned long pc = regs->tpc; + unsigned long tstate = regs->tstate; + u32 insn; + + if (notify_die(DIE_TRAP, "illegal instruction", regs, + 0, 0x10, SIGILL) == NOTIFY_STOP) + goto out; + + if (tstate & TSTATE_PRIV) + die_if_kernel("Kernel illegal instruction", regs); + if (test_thread_flag(TIF_32BIT)) + pc = (u32)pc; + if (get_user(insn, (u32 __user *) pc) != -EFAULT) { + if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ { + if (handle_popc(insn, regs)) + goto out; + } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { + if (handle_ldf_stq(insn, regs)) + goto out; + } else if (tlb_type == hypervisor) { + if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) { + if (!vis_emul(regs, insn)) + goto out; + } else { + struct fpustate *f = FPUSTATE; + + /* On UltraSPARC T2 and later, FPU insns which + * are not implemented in HW signal an illegal + * instruction trap and do not set the FP Trap + * Trap in the %fsr to unimplemented_FPop. + */ + if (do_mathemu(regs, f, true)) + goto out; + } + } + } + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc); +out: + exception_exit(prev_state); +} + +void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, "memory address unaligned", regs, + 0, 0x34, SIGSEGV) == NOTIFY_STOP) + goto out; + + if (regs->tstate & TSTATE_PRIV) { + kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); + goto out; + } + if (is_no_fault_exception(regs)) + return; + + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar); +out: + exception_exit(prev_state); +} + +void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + if (notify_die(DIE_TRAP, "memory address unaligned", regs, + 0, 0x34, SIGSEGV) == NOTIFY_STOP) + return; + + if (regs->tstate & TSTATE_PRIV) { + kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); + return; + } + if (is_no_fault_exception(regs)) + return; + + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) addr); +} + +/* sun4v_mem_corrupt_detect_precise() - Handle precise exception on an ADI + * tag mismatch. + * + * ADI version tag mismatch on a load from memory always results in a + * precise exception. Tag mismatch on a store to memory will result in + * precise exception if MCDPER or PMCDPER is set to 1. + */ +void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, unsigned long addr, + unsigned long context) +{ + if (notify_die(DIE_TRAP, "memory corruption precise exception", regs, + 0, 0x8, SIGSEGV) == NOTIFY_STOP) + return; + + if (regs->tstate & TSTATE_PRIV) { + /* MCD exception could happen because the task was running + * a system call with MCD enabled and passed a non-versioned + * pointer or pointer with bad version tag to the system + * call. + */ + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->tpc); + if (entry) { + /* Looks like a bad syscall parameter */ +#ifdef DEBUG_EXCEPTIONS + pr_emerg("Exception: PC<%016lx> faddr\n", + regs->tpc); + pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n", + regs->tpc, entry->fixup); +#endif + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + return; + } + pr_emerg("%s: ADDR[%016lx] CTX[%lx], going.\n", + __func__, addr, context); + die_if_kernel("MCD precise", regs); + } + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + force_sig_fault(SIGSEGV, SEGV_ADIPERR, (void __user *)addr); +} + +void do_privop(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, "privileged operation", regs, + 0, 0x11, SIGILL) == NOTIFY_STOP) + goto out; + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + force_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)regs->tpc); +out: + exception_exit(prev_state); +} + +void do_privact(struct pt_regs *regs) +{ + do_privop(regs); +} + +/* Trap level 1 stuff or other traps we should never see... */ +void do_cee(struct pt_regs *regs) +{ + exception_enter(); + die_if_kernel("TL0: Cache Error Exception", regs); +} + +void do_div0_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: DIV0 Exception", regs); +} + +void do_fpieee_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: FPU IEEE Exception", regs); +} + +void do_fpother_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: FPU Other Exception", regs); +} + +void do_ill_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: Illegal Instruction Exception", regs); +} + +void do_irq_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: IRQ Exception", regs); +} + +void do_lddfmna_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: LDDF Exception", regs); +} + +void do_stdfmna_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: STDF Exception", regs); +} + +void do_paw(struct pt_regs *regs) +{ + exception_enter(); + die_if_kernel("TL0: Phys Watchpoint Exception", regs); +} + +void do_paw_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: Phys Watchpoint Exception", regs); +} + +void do_vaw(struct pt_regs *regs) +{ + exception_enter(); + die_if_kernel("TL0: Virt Watchpoint Exception", regs); +} + +void do_vaw_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: Virt Watchpoint Exception", regs); +} + +void do_tof_tl1(struct pt_regs *regs) +{ + exception_enter(); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + die_if_kernel("TL1: Tag Overflow Exception", regs); +} + +void do_getpsr(struct pt_regs *regs) +{ + regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate); + regs->tpc = regs->tnpc; + regs->tnpc += 4; + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } +} + +u64 cpu_mondo_counter[NR_CPUS] = {0}; +struct trap_per_cpu trap_block[NR_CPUS]; +EXPORT_SYMBOL(trap_block); + +/* This can get invoked before sched_init() so play it super safe + * and use hard_smp_processor_id(). + */ +void notrace init_cur_cpu_trap(struct thread_info *t) +{ + int cpu = hard_smp_processor_id(); + struct trap_per_cpu *p = &trap_block[cpu]; + + p->thread = t; + p->pgd_paddr = 0; +} + +extern void thread_info_offsets_are_bolixed_dave(void); +extern void trap_per_cpu_offsets_are_bolixed_dave(void); +extern void tsb_config_offsets_are_bolixed_dave(void); + +/* Only invoked on boot processor. */ +void __init trap_init(void) +{ + /* Compile time sanity check. */ + BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) || + TI_FLAGS != offsetof(struct thread_info, flags) || + TI_CPU != offsetof(struct thread_info, cpu) || + TI_FPSAVED != offsetof(struct thread_info, fpsaved) || + TI_KSP != offsetof(struct thread_info, ksp) || + TI_FAULT_ADDR != offsetof(struct thread_info, + fault_address) || + TI_KREGS != offsetof(struct thread_info, kregs) || + TI_UTRAPS != offsetof(struct thread_info, utraps) || + TI_REG_WINDOW != offsetof(struct thread_info, + reg_window) || + TI_RWIN_SPTRS != offsetof(struct thread_info, + rwbuf_stkptrs) || + TI_GSR != offsetof(struct thread_info, gsr) || + TI_XFSR != offsetof(struct thread_info, xfsr) || + TI_PRE_COUNT != offsetof(struct thread_info, + preempt_count) || + TI_NEW_CHILD != offsetof(struct thread_info, new_child) || + TI_KUNA_REGS != offsetof(struct thread_info, + kern_una_regs) || + TI_KUNA_INSN != offsetof(struct thread_info, + kern_una_insn) || + TI_FPREGS != offsetof(struct thread_info, fpregs) || + (TI_FPREGS & (64 - 1))); + + BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, + thread) || + (TRAP_PER_CPU_PGD_PADDR != + offsetof(struct trap_per_cpu, pgd_paddr)) || + (TRAP_PER_CPU_CPU_MONDO_PA != + offsetof(struct trap_per_cpu, cpu_mondo_pa)) || + (TRAP_PER_CPU_DEV_MONDO_PA != + offsetof(struct trap_per_cpu, dev_mondo_pa)) || + (TRAP_PER_CPU_RESUM_MONDO_PA != + offsetof(struct trap_per_cpu, resum_mondo_pa)) || + (TRAP_PER_CPU_RESUM_KBUF_PA != + offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || + (TRAP_PER_CPU_NONRESUM_MONDO_PA != + offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || + (TRAP_PER_CPU_NONRESUM_KBUF_PA != + offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || + (TRAP_PER_CPU_FAULT_INFO != + offsetof(struct trap_per_cpu, fault_info)) || + (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != + offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || + (TRAP_PER_CPU_CPU_LIST_PA != + offsetof(struct trap_per_cpu, cpu_list_pa)) || + (TRAP_PER_CPU_TSB_HUGE != + offsetof(struct trap_per_cpu, tsb_huge)) || + (TRAP_PER_CPU_TSB_HUGE_TEMP != + offsetof(struct trap_per_cpu, tsb_huge_temp)) || + (TRAP_PER_CPU_IRQ_WORKLIST_PA != + offsetof(struct trap_per_cpu, irq_worklist_pa)) || + (TRAP_PER_CPU_CPU_MONDO_QMASK != + offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || + (TRAP_PER_CPU_DEV_MONDO_QMASK != + offsetof(struct trap_per_cpu, dev_mondo_qmask)) || + (TRAP_PER_CPU_RESUM_QMASK != + offsetof(struct trap_per_cpu, resum_qmask)) || + (TRAP_PER_CPU_NONRESUM_QMASK != + offsetof(struct trap_per_cpu, nonresum_qmask)) || + (TRAP_PER_CPU_PER_CPU_BASE != + offsetof(struct trap_per_cpu, __per_cpu_base))); + + BUILD_BUG_ON((TSB_CONFIG_TSB != + offsetof(struct tsb_config, tsb)) || + (TSB_CONFIG_RSS_LIMIT != + offsetof(struct tsb_config, tsb_rss_limit)) || + (TSB_CONFIG_NENTRIES != + offsetof(struct tsb_config, tsb_nentries)) || + (TSB_CONFIG_REG_VAL != + offsetof(struct tsb_config, tsb_reg_val)) || + (TSB_CONFIG_MAP_VADDR != + offsetof(struct tsb_config, tsb_map_vaddr)) || + (TSB_CONFIG_MAP_PTE != + offsetof(struct tsb_config, tsb_map_pte))); + + /* Attach to the address space of init_task. On SMP we + * do this in smp.c:smp_callin for other cpus. + */ + mmgrab(&init_mm); + current->active_mm = &init_mm; +} diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S new file mode 100644 index 000000000..eaed39ce8 --- /dev/null +++ b/arch/sparc/kernel/tsb.S @@ -0,0 +1,591 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* tsb.S: Sparc64 TSB table handling. + * + * Copyright (C) 2006 David S. Miller + */ + + +#include +#include +#include +#include +#include + + .text + .align 32 + + /* Invoked from TLB miss handler, we are in the + * MMU global registers and they are setup like + * this: + * + * %g1: TSB entry pointer + * %g2: available temporary + * %g3: FAULT_CODE_{D,I}TLB + * %g4: available temporary + * %g5: available temporary + * %g6: TAG TARGET + * %g7: available temporary, will be loaded by us with + * the physical address base of the linux page + * tables for the current address space + */ +tsb_miss_dtlb: + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g4 + srlx %g4, PAGE_SHIFT, %g4 + ba,pt %xcc, tsb_miss_page_table_walk + sllx %g4, PAGE_SHIFT, %g4 + +tsb_miss_itlb: + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_IMMU, %g4 + srlx %g4, PAGE_SHIFT, %g4 + ba,pt %xcc, tsb_miss_page_table_walk + sllx %g4, PAGE_SHIFT, %g4 + + /* At this point we have: + * %g1 -- PAGE_SIZE TSB entry address + * %g3 -- FAULT_CODE_{D,I}TLB + * %g4 -- missing virtual address + * %g6 -- TAG TARGET (vaddr >> 22) + */ +tsb_miss_page_table_walk: + TRAP_LOAD_TRAP_BLOCK(%g7, %g5) + + /* Before committing to a full page table walk, + * check the huge page TSB. + */ +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + +661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 + nop + .section .sun4v_2insn_patch, "ax" + .word 661b + mov SCRATCHPAD_UTSBREG2, %g5 + ldxa [%g5] ASI_SCRATCHPAD, %g5 + .previous + + cmp %g5, -1 + be,pt %xcc, 80f + nop + + /* We need an aligned pair of registers containing 2 values + * which can be easily rematerialized. %g6 and %g7 foot the + * bill just nicely. We'll save %g6 away into %g2 for the + * huge page TSB TAG comparison. + * + * Perform a huge page TSB lookup. + */ + mov %g6, %g2 + and %g5, 0x7, %g6 + mov 512, %g7 + andn %g5, 0x7, %g5 + sllx %g7, %g6, %g7 + srlx %g4, REAL_HPAGE_SHIFT, %g6 + sub %g7, 1, %g7 + and %g6, %g7, %g6 + sllx %g6, 4, %g6 + add %g5, %g6, %g5 + + TSB_LOAD_QUAD(%g5, %g6) + cmp %g6, %g2 + be,a,pt %xcc, tsb_tlb_reload + mov %g7, %g5 + + /* No match, remember the huge page TSB entry address, + * and restore %g6 and %g7. + */ + TRAP_LOAD_TRAP_BLOCK(%g7, %g6) + srlx %g4, 22, %g6 +80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP] + +#endif + + ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7 + + /* At this point we have: + * %g1 -- TSB entry address + * %g3 -- FAULT_CODE_{D,I}TLB + * %g4 -- missing virtual address + * %g6 -- TAG TARGET (vaddr >> 22) + * %g7 -- page table physical address + * + * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE + * TSB both lack a matching entry. + */ +tsb_miss_page_table_walk_sun4v_fastpath: + USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) + + /* Valid PTE is now in %g5. */ + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7 + sllx %g7, 32, %g7 + + andcc %g5, %g7, %g0 + be,pt %xcc, 60f + nop + + /* It is a huge page, use huge page TSB entry address we + * calculated above. If the huge page TSB has not been + * allocated, setup a trap stack and call hugetlb_setup() + * to do so, then return from the trap to replay the TLB + * miss. + * + * This is necessary to handle the case of transparent huge + * pages where we don't really have a non-atomic context + * in which to allocate the hugepage TSB hash table. When + * the 'mm' faults in the hugepage for the first time, we + * thus handle it here. This also makes sure that we can + * allocate the TSB hash table on the correct NUMA node. + */ + TRAP_LOAD_TRAP_BLOCK(%g7, %g2) + ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1 + cmp %g1, -1 + bne,pt %xcc, 60f + nop + +661: rdpr %pstate, %g5 + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + .section .sun4v_2insn_patch, "ax" + .word 661b + SET_GL(1) + nop + .previous + + rdpr %tl, %g7 + cmp %g7, 1 + bne,pn %xcc, winfix_trampoline + mov %g3, %g4 + ba,pt %xcc, etrap + rd %pc, %g7 + call hugetlb_setup + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + +60: +#endif + + /* At this point we have: + * %g1 -- TSB entry address + * %g3 -- FAULT_CODE_{D,I}TLB + * %g5 -- valid PTE + * %g6 -- TAG TARGET (vaddr >> 22) + */ +tsb_reload: + TSB_LOCK_TAG(%g1, %g2, %g7) + TSB_WRITE(%g1, %g5, %g6) + + /* Finally, load TLB and return from trap. */ +tsb_tlb_reload: + cmp %g3, FAULT_CODE_DTLB + bne,pn %xcc, tsb_itlb_load + nop + +tsb_dtlb_load: + +661: stxa %g5, [%g0] ASI_DTLB_DATA_IN + retry + .section .sun4v_2insn_patch, "ax" + .word 661b + nop + nop + .previous + + /* For sun4v the ASI_DTLB_DATA_IN store and the retry + * instruction get nop'd out and we get here to branch + * to the sun4v tlb load code. The registers are setup + * as follows: + * + * %g4: vaddr + * %g5: PTE + * %g6: TAG + * + * The sun4v TLB load wants the PTE in %g3 so we fix that + * up here. + */ + ba,pt %xcc, sun4v_dtlb_load + mov %g5, %g3 + +tsb_itlb_load: + /* Executable bit must be set. */ +661: sethi %hi(_PAGE_EXEC_4U), %g4 + andcc %g5, %g4, %g0 + .section .sun4v_2insn_patch, "ax" + .word 661b + andcc %g5, _PAGE_EXEC_4V, %g0 + nop + .previous + + be,pn %xcc, tsb_do_fault + nop + +661: stxa %g5, [%g0] ASI_ITLB_DATA_IN + retry + .section .sun4v_2insn_patch, "ax" + .word 661b + nop + nop + .previous + + /* For sun4v the ASI_ITLB_DATA_IN store and the retry + * instruction get nop'd out and we get here to branch + * to the sun4v tlb load code. The registers are setup + * as follows: + * + * %g4: vaddr + * %g5: PTE + * %g6: TAG + * + * The sun4v TLB load wants the PTE in %g3 so we fix that + * up here. + */ + ba,pt %xcc, sun4v_itlb_load + mov %g5, %g3 + + /* No valid entry in the page tables, do full fault + * processing. + */ + + .globl tsb_do_fault +tsb_do_fault: + cmp %g3, FAULT_CODE_DTLB + +661: rdpr %pstate, %g5 + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + .section .sun4v_2insn_patch, "ax" + .word 661b + SET_GL(1) + ldxa [%g0] ASI_SCRATCHPAD, %g4 + .previous + + bne,pn %xcc, tsb_do_itlb_fault + nop + +tsb_do_dtlb_fault: + rdpr %tl, %g3 + cmp %g3, 1 + +661: mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g5 + .section .sun4v_2insn_patch, "ax" + .word 661b + ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5 + nop + .previous + + /* Clear context ID bits. */ + srlx %g5, PAGE_SHIFT, %g5 + sllx %g5, PAGE_SHIFT, %g5 + + be,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_DTLB, %g4 + ba,pt %xcc, winfix_trampoline + nop + +tsb_do_itlb_fault: + rdpr %tpc, %g5 + ba,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_ITLB, %g4 + + .globl sparc64_realfault_common +sparc64_realfault_common: + /* fault code in %g4, fault address in %g5, etrap will + * preserve these two values in %l4 and %l5 respectively + */ + ba,pt %xcc, etrap ! Save trap state +1: rd %pc, %g7 ! ... + stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code + stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address + call do_sparc64_fault ! Call fault handler + add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg + ba,pt %xcc, rtrap ! Restore cpu state + nop ! Delay slot (fill me) + +winfix_trampoline: + rdpr %tpc, %g3 ! Prepare winfixup TNPC + or %g3, 0x7c, %g3 ! Compute branch offset + wrpr %g3, %tnpc ! Write it into TNPC + done ! Trap return + + /* Insert an entry into the TSB. + * + * %o0: TSB entry pointer (virt or phys address) + * %o1: tag + * %o2: pte + */ + .align 32 + .globl __tsb_insert +__tsb_insert: + rdpr %pstate, %o5 + wrpr %o5, PSTATE_IE, %pstate + TSB_LOCK_TAG(%o0, %g2, %g3) + TSB_WRITE(%o0, %o2, %o1) + wrpr %o5, %pstate + retl + nop + .size __tsb_insert, .-__tsb_insert + + /* Flush the given TSB entry if it has the matching + * tag. + * + * %o0: TSB entry pointer (virt or phys address) + * %o1: tag + */ + .align 32 + .globl tsb_flush + .type tsb_flush,#function +tsb_flush: + sethi %hi(TSB_TAG_LOCK_HIGH), %g2 +1: TSB_LOAD_TAG(%o0, %g1) + srlx %g1, 32, %o3 + andcc %o3, %g2, %g0 + bne,pn %icc, 1b + nop + cmp %g1, %o1 + mov 1, %o3 + bne,pt %xcc, 2f + sllx %o3, TSB_TAG_INVALID_BIT, %o3 + TSB_CAS_TAG(%o0, %g1, %o3) + cmp %g1, %o3 + bne,pn %xcc, 1b + nop +2: retl + nop + .size tsb_flush, .-tsb_flush + + /* Reload MMU related context switch state at + * schedule() time. + * + * %o0: page table physical address + * %o1: TSB base config pointer + * %o2: TSB huge config pointer, or NULL if none + * %o3: Hypervisor TSB descriptor physical address + * %o4: Secondary context to load, if non-zero + * + * We have to run this whole thing with interrupts + * disabled so that the current cpu doesn't change + * due to preemption. + */ + .align 32 + .globl __tsb_context_switch + .type __tsb_context_switch,#function +__tsb_context_switch: + rdpr %pstate, %g1 + wrpr %g1, PSTATE_IE, %pstate + + brz,pn %o4, 1f + mov SECONDARY_CONTEXT, %o5 + +661: stxa %o4, [%o5] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %o4, [%o5] ASI_MMU + .previous + flush %g6 + +1: + TRAP_LOAD_TRAP_BLOCK(%g2, %g3) + + stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] + + ldx [%o1 + TSB_CONFIG_REG_VAL], %o0 + brz,pt %o2, 1f + mov -1, %g3 + + ldx [%o2 + TSB_CONFIG_REG_VAL], %g3 + +1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE] + + sethi %hi(tlb_type), %g2 + lduw [%g2 + %lo(tlb_type)], %g2 + cmp %g2, 3 + bne,pt %icc, 50f + nop + + /* Hypervisor TSB switch. */ + mov SCRATCHPAD_UTSBREG1, %o5 + stxa %o0, [%o5] ASI_SCRATCHPAD + mov SCRATCHPAD_UTSBREG2, %o5 + stxa %g3, [%o5] ASI_SCRATCHPAD + + mov 2, %o0 + cmp %g3, -1 + move %xcc, 1, %o0 + + mov HV_FAST_MMU_TSB_CTXNON0, %o5 + mov %o3, %o1 + ta HV_FAST_TRAP + + /* Finish up. */ + ba,pt %xcc, 9f + nop + + /* SUN4U TSB switch. */ +50: mov TSB_REG, %o5 + stxa %o0, [%o5] ASI_DMMU + membar #Sync + stxa %o0, [%o5] ASI_IMMU + membar #Sync + +2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4 + brz %o4, 9f + ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5 + + sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 + mov TLB_TAG_ACCESS, %g3 + lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 + stxa %o4, [%g3] ASI_DMMU + membar #Sync + sllx %g2, 3, %g2 + stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS + membar #Sync + + brz,pt %o2, 9f + nop + + ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4 + ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5 + mov TLB_TAG_ACCESS, %g3 + stxa %o4, [%g3] ASI_DMMU + membar #Sync + sub %g2, (1 << 3), %g2 + stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS + membar #Sync + +9: + wrpr %g1, %pstate + + retl + nop + .size __tsb_context_switch, .-__tsb_context_switch + +#define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \ + (1 << TSB_TAG_INVALID_BIT)) + + .align 32 + .globl copy_tsb + .type copy_tsb,#function +copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size + * %o2=new_tsb_base, %o3=new_tsb_size + * %o4=page_size_shift + */ + sethi %uhi(TSB_PASS_BITS), %g7 + srlx %o3, 4, %o3 + add %o0, %o1, %o1 /* end of old tsb */ + sllx %g7, 32, %g7 + sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ + + mov %o4, %g1 /* page_size_shift */ + +661: prefetcha [%o0] ASI_N, #one_read + .section .tsb_phys_patch, "ax" + .word 661b + prefetcha [%o0] ASI_PHYS_USE_EC, #one_read + .previous + +90: andcc %o0, (64 - 1), %g0 + bne 1f + add %o0, 64, %o5 + +661: prefetcha [%o5] ASI_N, #one_read + .section .tsb_phys_patch, "ax" + .word 661b + prefetcha [%o5] ASI_PHYS_USE_EC, #one_read + .previous + +1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */ + andcc %g2, %g7, %g0 /* LOCK or INVALID set? */ + bne,pn %xcc, 80f /* Skip it */ + sllx %g2, 22, %o4 /* TAG --> VADDR */ + + /* This can definitely be computed faster... */ + srlx %o0, 4, %o5 /* Build index */ + and %o5, 511, %o5 /* Mask index */ + sllx %o5, %g1, %o5 /* Put into vaddr position */ + or %o4, %o5, %o4 /* Full VADDR. */ + srlx %o4, %g1, %o4 /* Shift down to create index */ + and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ + sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ + TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ + add %o4, 0x8, %o4 /* Advance to TTE */ + TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ + +80: add %o0, 16, %o0 + cmp %o0, %o1 + bne,pt %xcc, 90b + nop + + retl + nop + .size copy_tsb, .-copy_tsb + + /* Set the invalid bit in all TSB entries. */ + .align 32 + .globl tsb_init + .type tsb_init,#function +tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */ + prefetch [%o0 + 0x000], #n_writes + mov 1, %g1 + prefetch [%o0 + 0x040], #n_writes + sllx %g1, TSB_TAG_INVALID_BIT, %g1 + prefetch [%o0 + 0x080], #n_writes +1: prefetch [%o0 + 0x0c0], #n_writes + stx %g1, [%o0 + 0x00] + stx %g1, [%o0 + 0x10] + stx %g1, [%o0 + 0x20] + stx %g1, [%o0 + 0x30] + prefetch [%o0 + 0x100], #n_writes + stx %g1, [%o0 + 0x40] + stx %g1, [%o0 + 0x50] + stx %g1, [%o0 + 0x60] + stx %g1, [%o0 + 0x70] + prefetch [%o0 + 0x140], #n_writes + stx %g1, [%o0 + 0x80] + stx %g1, [%o0 + 0x90] + stx %g1, [%o0 + 0xa0] + stx %g1, [%o0 + 0xb0] + prefetch [%o0 + 0x180], #n_writes + stx %g1, [%o0 + 0xc0] + stx %g1, [%o0 + 0xd0] + stx %g1, [%o0 + 0xe0] + stx %g1, [%o0 + 0xf0] + subcc %o1, 0x100, %o1 + bne,pt %xcc, 1b + add %o0, 0x100, %o0 + retl + nop + nop + nop + .size tsb_init, .-tsb_init + + .globl NGtsb_init + .type NGtsb_init,#function +NGtsb_init: + rd %asi, %g2 + mov 1, %g1 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + sllx %g1, TSB_TAG_INVALID_BIT, %g1 +1: stxa %g1, [%o0 + 0x00] %asi + stxa %g1, [%o0 + 0x10] %asi + stxa %g1, [%o0 + 0x20] %asi + stxa %g1, [%o0 + 0x30] %asi + stxa %g1, [%o0 + 0x40] %asi + stxa %g1, [%o0 + 0x50] %asi + stxa %g1, [%o0 + 0x60] %asi + stxa %g1, [%o0 + 0x70] %asi + stxa %g1, [%o0 + 0x80] %asi + stxa %g1, [%o0 + 0x90] %asi + stxa %g1, [%o0 + 0xa0] %asi + stxa %g1, [%o0 + 0xb0] %asi + stxa %g1, [%o0 + 0xc0] %asi + stxa %g1, [%o0 + 0xd0] %asi + stxa %g1, [%o0 + 0xe0] %asi + stxa %g1, [%o0 + 0xf0] %asi + subcc %o1, 0x100, %o1 + bne,pt %xcc, 1b + add %o0, 0x100, %o0 + membar #Sync + retl + wr %g2, 0x0, %asi + .size NGtsb_init, .-NGtsb_init diff --git a/arch/sparc/kernel/ttable_32.S b/arch/sparc/kernel/ttable_32.S new file mode 100644 index 000000000..e79fd786f --- /dev/null +++ b/arch/sparc/kernel/ttable_32.S @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* The Sparc trap table, bootloader gives us control at _start. */ + __HEAD + + .globl _start +_start: + + .globl _stext +_stext: + + .globl trapbase +trapbase: + +#ifdef CONFIG_SMP +trapbase_cpu0: +#endif +/* We get control passed to us here at t_zero. */ +t_zero: b gokernel; nop; nop; nop; +t_tflt: SRMMU_TFAULT /* Inst. Access Exception */ +t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */ +t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */ +t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */ +t_wovf: WINDOW_SPILL /* Window Overflow */ +t_wunf: WINDOW_FILL /* Window Underflow */ +t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */ +t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */ +t_dflt: SRMMU_DFAULT /* Data Miss Exception */ +t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */ +t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */ +t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) +t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */ +t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */ +t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */ +t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */ +t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */ +t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */ +t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */ +t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */ +t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */ +t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */ +t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */ +t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */ +t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */ +t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */ + + .globl t_nmi +t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) + +t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */ +t_iacce:BAD_TRAP(0x21) /* Instr Access Error */ +t_bad22:BAD_TRAP(0x22) + BAD_TRAP(0x23) +t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */ +t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */ +t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27) +t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */ +t_dacce:SRMMU_DFAULT /* Data Access Error */ +t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */ +t_dserr:BAD_TRAP(0x2b) /* Data Store Error */ +t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */ +t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) +t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) +t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) +t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */ +t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41) +t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46) +t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b) +t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50) +t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) +t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) +t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) +t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) +t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) +t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) +t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) +t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) +t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) +t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f) +t_bad80:BAD_TRAP(0x80) /* SunOS System Call */ +t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */ +t_divz: TRAP_ENTRY(0x82, do_hw_divzero) /* Divide by zero trap */ +t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */ +t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */ +t_rchk: BAD_TRAP(0x85) /* Range Check */ +t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */ +t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */ +t_bad88:BAD_TRAP(0x88) /* Slowaris System Call */ +t_bad89:BAD_TRAP(0x89) /* Net-B.S. System Call */ +t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e) +t_bad8f:BAD_TRAP(0x8f) +t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */ +t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95) +t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a) +t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f) +t_getcc:GETCC_TRAP /* Get Condition Codes */ +t_setcc:SETCC_TRAP /* Set Condition Codes */ +t_getpsr:GETPSR_TRAP /* Get PSR Register */ +t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) +t_bada7:BAD_TRAP(0xa7) +t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) +t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) +t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) +t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) +t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) +t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) +t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) +t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) +t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) +t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) +t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) +t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) +t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) +t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) +t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) +t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) +t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) +t_badfc:BAD_TRAP(0xfc) +t_kgdb: KGDB_TRAP(0xfd) +dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */ +dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */ + + .globl end_traptable +end_traptable: + +#ifdef CONFIG_SMP + /* Trap tables for the other cpus. */ + .globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3 +trapbase_cpu1: + BAD_TRAP(0x0) + SRMMU_TFAULT + TRAP_ENTRY(0x2, bad_instruction) + TRAP_ENTRY(0x3, priv_instruction) + TRAP_ENTRY(0x4, fpd_trap_handler) + WINDOW_SPILL + WINDOW_FILL + TRAP_ENTRY(0x7, mna_handler) + TRAP_ENTRY(0x8, fpe_trap_handler) + SRMMU_DFAULT + TRAP_ENTRY(0xa, do_tag_overflow) + TRAP_ENTRY(0xb, do_watchpoint) + BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) + TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2) + TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4) + TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6) + TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8) + TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10) + TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12) + TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14) + TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) + TRAP_ENTRY(0x20, do_reg_access) + BAD_TRAP(0x21) + BAD_TRAP(0x22) + BAD_TRAP(0x23) + TRAP_ENTRY(0x24, do_cp_disabled) + SKIP_TRAP(0x25, unimp_flush) + BAD_TRAP(0x26) + BAD_TRAP(0x27) + TRAP_ENTRY(0x28, do_cp_exception) + SRMMU_DFAULT + TRAP_ENTRY(0x2a, do_hw_divzero) + BAD_TRAP(0x2b) + BAD_TRAP(0x2c) + BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) + BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) + BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) + BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) + BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) + BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) + BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) + BAD_TRAP(0x50) + BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) + BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) + BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) + BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) + BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) + BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) + BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) + BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) + BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) + BAD_TRAP(0x7e) BAD_TRAP(0x7f) + BAD_TRAP(0x80) + BREAKPOINT_TRAP + TRAP_ENTRY(0x82, do_hw_divzero) + TRAP_ENTRY(0x83, do_flush_windows) + BAD_TRAP(0x84) BAD_TRAP(0x85) BAD_TRAP(0x86) + BAD_TRAP(0x87) BAD_TRAP(0x88) BAD_TRAP(0x89) + BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) + BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) + LINUX_SYSCALL_TRAP BAD_TRAP(0x91) + BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) + BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) + BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) + BAD_TRAP(0x9f) + GETCC_TRAP + SETCC_TRAP + GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) + BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) + BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) + BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) + BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) + BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) + BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) + BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) + BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) + BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) + BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) + BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) + BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) + BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) + BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) + BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) + BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) + BAD_TRAP(0xfc) + KGDB_TRAP(0xfd) + BAD_TRAP(0xfe) + BAD_TRAP(0xff) + +trapbase_cpu2: + BAD_TRAP(0x0) + SRMMU_TFAULT + TRAP_ENTRY(0x2, bad_instruction) + TRAP_ENTRY(0x3, priv_instruction) + TRAP_ENTRY(0x4, fpd_trap_handler) + WINDOW_SPILL + WINDOW_FILL + TRAP_ENTRY(0x7, mna_handler) + TRAP_ENTRY(0x8, fpe_trap_handler) + SRMMU_DFAULT + TRAP_ENTRY(0xa, do_tag_overflow) + TRAP_ENTRY(0xb, do_watchpoint) + BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) + TRAP_ENTRY_INTERRUPT(1) + TRAP_ENTRY_INTERRUPT(2) + TRAP_ENTRY_INTERRUPT(3) + TRAP_ENTRY_INTERRUPT(4) + TRAP_ENTRY_INTERRUPT(5) + TRAP_ENTRY_INTERRUPT(6) + TRAP_ENTRY_INTERRUPT(7) + TRAP_ENTRY_INTERRUPT(8) + TRAP_ENTRY_INTERRUPT(9) + TRAP_ENTRY_INTERRUPT(10) + TRAP_ENTRY_INTERRUPT(11) + TRAP_ENTRY_INTERRUPT(12) + TRAP_ENTRY_INTERRUPT(13) + TRAP_ENTRY_INTERRUPT(14) + TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) + TRAP_ENTRY(0x20, do_reg_access) + BAD_TRAP(0x21) + BAD_TRAP(0x22) + BAD_TRAP(0x23) + TRAP_ENTRY(0x24, do_cp_disabled) + SKIP_TRAP(0x25, unimp_flush) + BAD_TRAP(0x26) + BAD_TRAP(0x27) + TRAP_ENTRY(0x28, do_cp_exception) + SRMMU_DFAULT + TRAP_ENTRY(0x2a, do_hw_divzero) + BAD_TRAP(0x2b) + BAD_TRAP(0x2c) + BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) + BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) + BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) + BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) + BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) + BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) + BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) + BAD_TRAP(0x50) + BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) + BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) + BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) + BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) + BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) + BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) + BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) + BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) + BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) + BAD_TRAP(0x7e) BAD_TRAP(0x7f) + BAD_TRAP(0x80) + BREAKPOINT_TRAP + TRAP_ENTRY(0x82, do_hw_divzero) + TRAP_ENTRY(0x83, do_flush_windows) + BAD_TRAP(0x84) + BAD_TRAP(0x85) + BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88) + BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) + BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) + LINUX_SYSCALL_TRAP BAD_TRAP(0x91) + BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) + BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) + BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) + BAD_TRAP(0x9f) + GETCC_TRAP + SETCC_TRAP + GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) + BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) + BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) + BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) + BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) + BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) + BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) + BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) + BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) + BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) + BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) + BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) + BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) + BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) + BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) + BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) + BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) + BAD_TRAP(0xfc) + KGDB_TRAP(0xfd) + BAD_TRAP(0xfe) + BAD_TRAP(0xff) + +trapbase_cpu3: + BAD_TRAP(0x0) + SRMMU_TFAULT + TRAP_ENTRY(0x2, bad_instruction) + TRAP_ENTRY(0x3, priv_instruction) + TRAP_ENTRY(0x4, fpd_trap_handler) + WINDOW_SPILL + WINDOW_FILL + TRAP_ENTRY(0x7, mna_handler) + TRAP_ENTRY(0x8, fpe_trap_handler) + SRMMU_DFAULT + TRAP_ENTRY(0xa, do_tag_overflow) + TRAP_ENTRY(0xb, do_watchpoint) + BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) + TRAP_ENTRY_INTERRUPT(1) + TRAP_ENTRY_INTERRUPT(2) + TRAP_ENTRY_INTERRUPT(3) + TRAP_ENTRY_INTERRUPT(4) + TRAP_ENTRY_INTERRUPT(5) + TRAP_ENTRY_INTERRUPT(6) + TRAP_ENTRY_INTERRUPT(7) + TRAP_ENTRY_INTERRUPT(8) + TRAP_ENTRY_INTERRUPT(9) + TRAP_ENTRY_INTERRUPT(10) + TRAP_ENTRY_INTERRUPT(11) + TRAP_ENTRY_INTERRUPT(12) + TRAP_ENTRY_INTERRUPT(13) + TRAP_ENTRY_INTERRUPT(14) + TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) + TRAP_ENTRY(0x20, do_reg_access) + BAD_TRAP(0x21) + BAD_TRAP(0x22) + BAD_TRAP(0x23) + TRAP_ENTRY(0x24, do_cp_disabled) + SKIP_TRAP(0x25, unimp_flush) + BAD_TRAP(0x26) + BAD_TRAP(0x27) + TRAP_ENTRY(0x28, do_cp_exception) + SRMMU_DFAULT + TRAP_ENTRY(0x2a, do_hw_divzero) + BAD_TRAP(0x2b) BAD_TRAP(0x2c) + BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) + BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) + BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) + BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) + BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) + BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) + BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) + BAD_TRAP(0x50) + BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) + BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) + BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) + BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) + BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) + BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) + BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) + BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) + BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) + BAD_TRAP(0x7e) BAD_TRAP(0x7f) + BAD_TRAP(0x80) + BREAKPOINT_TRAP + TRAP_ENTRY(0x82, do_hw_divzero) + TRAP_ENTRY(0x83, do_flush_windows) + BAD_TRAP(0x84) BAD_TRAP(0x85) + BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88) + BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) + BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) + LINUX_SYSCALL_TRAP + BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) + BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) + BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) + BAD_TRAP(0x9f) + GETCC_TRAP + SETCC_TRAP + GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) + BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) + BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) + BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) + BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) + BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) + BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) + BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) + BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) + BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) + BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) + BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) + BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) + BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) + BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) + BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) + BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) + BAD_TRAP(0xfc) + KGDB_TRAP(0xfd) + BAD_TRAP(0xfe) + BAD_TRAP(0xff) + +#endif diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S new file mode 100644 index 000000000..86e737e59 --- /dev/null +++ b/arch/sparc/kernel/ttable_64.S @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions. + * + * Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net) + */ + + + .globl sparc64_ttable_tl0, sparc64_ttable_tl1 + .globl tl0_icpe, tl1_icpe + .globl tl0_dcpe, tl1_dcpe + .globl tl0_fecc, tl1_fecc + .globl tl0_cee, tl1_cee + .globl tl0_iae, tl1_iae + .globl tl0_dae, tl1_dae + +sparc64_ttable_tl0: +tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) +tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) +tl0_iax: membar #Sync + TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) +tl0_itsb_4v: SUN4V_ITSB_MISS +tl0_iae: membar #Sync + TRAP_NOSAVE_7INSNS(__spitfire_access_error) +tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) +tl0_ill: membar #Sync + TRAP_7INSNS(do_illegal_instruction) +tl0_privop: TRAP(do_privop) +tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17) +tl0_resv018: BTRAP(0x18) BTRAP(0x19) +tl0_mcd: SUN4V_MCD_PRECISE +tl0_resv01b: BTRAP(0x1b) +tl0_resv01c: BTRAP(0x1c) BTRAP(0x1d) BTRAP(0x1e) BTRAP(0x1f) +tl0_fpdis: TRAP_NOSAVE(do_fpdis) +tl0_fpieee: TRAP_SAVEFPU(do_fpieee) +tl0_fpother: TRAP_NOSAVE(do_fpother_check_fitos) +tl0_tof: TRAP(do_tof) +tl0_cwin: CLEAN_WINDOW +tl0_div0: TRAP(do_div0) +tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) +tl0_resv02f: BTRAP(0x2f) +tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) +tl0_dtsb_4v: SUN4V_DTSB_MISS +tl0_dae: membar #Sync + TRAP_NOSAVE_7INSNS(__spitfire_access_error) +tl0_resv033: BTRAP(0x33) +tl0_mna: TRAP_NOSAVE(do_mna) +tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) +tl0_stdfmna: TRAP_NOSAVE(do_stdfmna) +tl0_privact: TRAP_NOSAVE(__do_privact) +tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d) +tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) +#ifdef CONFIG_SMP +tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) +tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) +tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) +tl0_irq4: BTRAP(0x44) +#else +tl0_irq1: BTRAP(0x41) +tl0_irq2: BTRAP(0x42) +tl0_irq3: BTRAP(0x43) +tl0_irq4: BTRAP(0x44) +#endif +tl0_irq5: TRAP_IRQ(handler_irq, 5) +#ifdef CONFIG_SMP +tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6) +#else +tl0_irq6: BTRAP(0x46) +#endif +tl0_irq7: TRAP_IRQ(deferred_pcr_work_irq, 7) +#if defined(CONFIG_KGDB) && defined(CONFIG_SMP) +tl0_irq8: TRAP_IRQ(smp_kgdb_capture_client, 8) +#else +tl0_irq8: BTRAP(0x48) +#endif +tl0_irq9: BTRAP(0x49) +tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) +tl0_irq14: TRAP_IRQ(timer_interrupt, 14) +tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15) +tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55) +tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b) +tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f) +tl0_ivec: TRAP_IVEC +tl0_paw: TRAP(do_paw) +tl0_vaw: TRAP(do_vaw) +tl0_cee: membar #Sync + TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) +tl0_iamiss: +#include "itlb_miss.S" +tl0_damiss: +#include "dtlb_miss.S" +tl0_daprot: +#include "dtlb_prot.S" +tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */ +tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */ +tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ +tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) +tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) +tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo) +tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo) +tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo) +tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo) +tl0_s0n: SPILL_0_NORMAL +tl0_s1n: SPILL_1_NORMAL +tl0_s2n: SPILL_2_NORMAL +tl0_s3n: SPILL_0_NORMAL_ETRAP +tl0_s4n: SPILL_1_GENERIC_ETRAP +tl0_s5n: SPILL_1_GENERIC_ETRAP_FIXUP +tl0_s6n: SPILL_2_GENERIC_ETRAP +tl0_s7n: SPILL_2_GENERIC_ETRAP_FIXUP +tl0_s0o: SPILL_0_OTHER +tl0_s1o: SPILL_1_OTHER +tl0_s2o: SPILL_2_OTHER +tl0_s3o: SPILL_3_OTHER +tl0_s4o: SPILL_4_OTHER +tl0_s5o: SPILL_5_OTHER +tl0_s6o: SPILL_6_OTHER +tl0_s7o: SPILL_7_OTHER +tl0_f0n: FILL_0_NORMAL +tl0_f1n: FILL_1_NORMAL +tl0_f2n: FILL_2_NORMAL +tl0_f3n: FILL_3_NORMAL +tl0_f4n: FILL_4_NORMAL +tl0_f5n: FILL_0_NORMAL_RTRAP +tl0_f6n: FILL_1_GENERIC_RTRAP +tl0_f7n: FILL_2_GENERIC_RTRAP +tl0_f0o: FILL_0_OTHER +tl0_f1o: FILL_1_OTHER +tl0_f2o: FILL_2_OTHER +tl0_f3o: FILL_3_OTHER +tl0_f4o: FILL_4_OTHER +tl0_f5o: FILL_5_OTHER +tl0_f6o: FILL_6_OTHER +tl0_f7o: FILL_7_OTHER +tl0_resv100: BTRAP(0x100) +tl0_bkpt: BREAKPOINT_TRAP +tl0_divz: TRAP(do_div0) +tl0_flushw: FLUSH_WINDOW_TRAP +tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) BTRAP(0x108) +tl0_resv109: BTRAP(0x109) BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) +tl0_resv10e: BTRAP(0x10e) BTRAP(0x10f) +tl0_linux32: LINUX_32BIT_SYSCALL_TRAP +tl0_oldlinux64: LINUX_64BIT_SYSCALL_TRAP +tl0_resv112: TRAP_UTRAP(UT_TRAP_INSTRUCTION_18,0x112) TRAP_UTRAP(UT_TRAP_INSTRUCTION_19,0x113) +tl0_resv114: TRAP_UTRAP(UT_TRAP_INSTRUCTION_20,0x114) TRAP_UTRAP(UT_TRAP_INSTRUCTION_21,0x115) +tl0_resv116: TRAP_UTRAP(UT_TRAP_INSTRUCTION_22,0x116) TRAP_UTRAP(UT_TRAP_INSTRUCTION_23,0x117) +tl0_resv118: TRAP_UTRAP(UT_TRAP_INSTRUCTION_24,0x118) TRAP_UTRAP(UT_TRAP_INSTRUCTION_25,0x119) +tl0_resv11a: TRAP_UTRAP(UT_TRAP_INSTRUCTION_26,0x11a) TRAP_UTRAP(UT_TRAP_INSTRUCTION_27,0x11b) +tl0_resv11c: TRAP_UTRAP(UT_TRAP_INSTRUCTION_28,0x11c) TRAP_UTRAP(UT_TRAP_INSTRUCTION_29,0x11d) +tl0_resv11e: TRAP_UTRAP(UT_TRAP_INSTRUCTION_30,0x11e) TRAP_UTRAP(UT_TRAP_INSTRUCTION_31,0x11f) +tl0_getcc: GETCC_TRAP +tl0_setcc: SETCC_TRAP +tl0_getpsr: TRAP(do_getpsr) +tl0_resv123: BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126) BTRAP(0x127) +tl0_resv128: BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c) +tl0_resv12d: BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131) +tl0_resv132: BTRAP(0x132) BTRAP(0x133) BTRAP(0x134) BTRAP(0x135) BTRAP(0x136) +tl0_resv137: BTRAP(0x137) BTRAP(0x138) BTRAP(0x139) BTRAP(0x13a) BTRAP(0x13b) +tl0_resv13c: BTRAP(0x13c) BTRAP(0x13d) BTRAP(0x13e) BTRAP(0x13f) BTRAP(0x140) +tl0_resv141: BTRAP(0x141) BTRAP(0x142) BTRAP(0x143) BTRAP(0x144) BTRAP(0x145) +tl0_resv146: BTRAP(0x146) BTRAP(0x147) BTRAP(0x148) BTRAP(0x149) BTRAP(0x14a) +tl0_resv14b: BTRAP(0x14b) BTRAP(0x14c) BTRAP(0x14d) BTRAP(0x14e) BTRAP(0x14f) +tl0_resv150: BTRAP(0x150) BTRAP(0x151) BTRAP(0x152) BTRAP(0x153) BTRAP(0x154) +tl0_resv155: BTRAP(0x155) BTRAP(0x156) BTRAP(0x157) BTRAP(0x158) BTRAP(0x159) +tl0_resv15a: BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e) +tl0_resv15f: BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163) +tl0_resv164: BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168) +tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c) +tl0_linux64: LINUX_64BIT_SYSCALL_TRAP +tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context) +tl0_resv170: KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) KGDB_TRAP(0x172) +tl0_resv173: UPROBES_TRAP(0x173) UPROBES_TRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177) +tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c) +tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f) +#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7) +tl0_resv180: BTRAPS(0x180) BTRAPS(0x188) +tl0_resv190: BTRAPS(0x190) BTRAPS(0x198) +tl0_resv1a0: BTRAPS(0x1a0) BTRAPS(0x1a8) +tl0_resv1b0: BTRAPS(0x1b0) BTRAPS(0x1b8) +tl0_resv1c0: BTRAPS(0x1c0) BTRAPS(0x1c8) +tl0_resv1d0: BTRAPS(0x1d0) BTRAPS(0x1d8) +tl0_resv1e0: BTRAPS(0x1e0) BTRAPS(0x1e8) +tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8) + +sparc64_ttable_tl1: +tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) +tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) +tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) +tl1_itsb_4v: SUN4V_ITSB_MISS +tl1_iae: membar #Sync + TRAP_NOSAVE_7INSNS(__spitfire_access_error) +tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) +tl1_ill: TRAPTL1(do_ill_tl1) +tl1_privop: BTRAPTL1(0x11) +tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15) +tl1_resv016: BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19) +tl1_resv01a: BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d) +tl1_resv01e: BTRAPTL1(0x1e) BTRAPTL1(0x1f) +tl1_fpdis: TRAP_NOSAVE(do_fpdis) +tl1_fpieee: TRAPTL1(do_fpieee_tl1) +tl1_fpother: TRAPTL1(do_fpother_tl1) +tl1_tof: TRAPTL1(do_tof_tl1) +tl1_cwin: CLEAN_WINDOW +tl1_div0: TRAPTL1(do_div0_tl1) +tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) +tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) +tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) +tl1_dtsb_4v: SUN4V_DTSB_MISS +tl1_dae: membar #Sync + TRAP_NOSAVE_7INSNS(__spitfire_access_error) +tl1_resv033: BTRAPTL1(0x33) +tl1_mna: TRAP_NOSAVE(do_mna) +tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) +tl1_stdfmna: TRAPTL1(do_stdfmna_tl1) +tl1_privact: BTRAPTL1(0x37) +tl1_resv038: BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b) +tl1_resv03c: BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f) +tl1_resv040: BTRAPTL1(0x40) +tl1_irq1: TRAP_IRQ(do_irq_tl1, 1) TRAP_IRQ(do_irq_tl1, 2) TRAP_IRQ(do_irq_tl1, 3) +tl1_irq4: TRAP_IRQ(do_irq_tl1, 4) TRAP_IRQ(do_irq_tl1, 5) TRAP_IRQ(do_irq_tl1, 6) +tl1_irq7: TRAP_IRQ(do_irq_tl1, 7) TRAP_IRQ(do_irq_tl1, 8) TRAP_IRQ(do_irq_tl1, 9) +tl1_irq10: TRAP_IRQ(do_irq_tl1, 10) TRAP_IRQ(do_irq_tl1, 11) +tl1_irq12: TRAP_IRQ(do_irq_tl1, 12) TRAP_IRQ(do_irq_tl1, 13) +tl1_irq14: TRAP_IRQ(do_irq_tl1, 14) TRAP_IRQ(do_irq_tl1, 15) +tl1_resv050: BTRAPTL1(0x50) BTRAPTL1(0x51) BTRAPTL1(0x52) BTRAPTL1(0x53) +tl1_resv054: BTRAPTL1(0x54) BTRAPTL1(0x55) BTRAPTL1(0x56) BTRAPTL1(0x57) +tl1_resv058: BTRAPTL1(0x58) BTRAPTL1(0x59) BTRAPTL1(0x5a) BTRAPTL1(0x5b) +tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f) +tl1_ivec: TRAP_IVEC +tl1_paw: TRAPTL1(do_paw_tl1) +tl1_vaw: TRAPTL1(do_vaw_tl1) +tl1_cee: BTRAPTL1(0x63) +tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) +tl1_damiss: +#include "dtlb_miss.S" +tl1_daprot: +#include "dtlb_prot.S" +tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */ +tl1_dcpe: BTRAPTL1(0x71) /* D-cache Parity Error on Cheetah+ */ +tl1_icpe: BTRAPTL1(0x72) /* I-cache Parity Error on Cheetah+ */ +tl1_resv073: BTRAPTL1(0x73) +tl1_resv074: BTRAPTL1(0x74) BTRAPTL1(0x75) BTRAPTL1(0x76) BTRAPTL1(0x77) +tl1_resv078: BTRAPTL1(0x78) BTRAPTL1(0x79) BTRAPTL1(0x7a) BTRAPTL1(0x7b) +tl1_resv07c: BTRAPTL1(0x7c) BTRAPTL1(0x7d) BTRAPTL1(0x7e) BTRAPTL1(0x7f) +tl1_s0n: SPILL_0_NORMAL +tl1_s1n: SPILL_1_NORMAL +tl1_s2n: SPILL_2_NORMAL +tl1_s3n: SPILL_3_NORMAL +tl1_s4n: SPILL_4_NORMAL +tl1_s5n: SPILL_5_NORMAL +tl1_s6n: SPILL_6_NORMAL +tl1_s7n: SPILL_7_NORMAL +tl1_s0o: SPILL_0_OTHER +tl1_s1o: SPILL_1_OTHER +tl1_s2o: SPILL_2_OTHER +tl1_s3o: SPILL_3_OTHER +tl1_s4o: SPILL_4_OTHER +tl1_s5o: SPILL_5_OTHER +tl1_s6o: SPILL_6_OTHER +tl1_s7o: SPILL_7_OTHER +tl1_f0n: FILL_0_NORMAL +tl1_f1n: FILL_1_NORMAL +tl1_f2n: FILL_2_NORMAL +tl1_f3n: FILL_3_NORMAL +tl1_f4n: FILL_4_NORMAL +tl1_f5n: FILL_5_NORMAL +tl1_f6n: FILL_6_NORMAL +tl1_f7n: FILL_7_NORMAL +tl1_f0o: FILL_0_OTHER +tl1_f1o: FILL_1_OTHER +tl1_f2o: FILL_2_OTHER +tl1_f3o: FILL_3_OTHER +tl1_f4o: FILL_4_OTHER +tl1_f5o: FILL_5_OTHER +tl1_f6o: FILL_6_OTHER +tl1_f7o: FILL_7_OTHER diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S new file mode 100644 index 000000000..f8bf83928 --- /dev/null +++ b/arch/sparc/kernel/una_asm_32.S @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* una_asm.S: Kernel unaligned trap assembler helpers. + * + * Copyright (C) 1996,2005,2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include + + .text + +retl_efault: + retl + mov -EFAULT, %o0 + + /* int __do_int_store(unsigned long *dst_addr, int size, + * unsigned long *src_val) + * + * %o0 = dest_addr + * %o1 = size + * %o2 = src_val + * + * Return '0' on success, -EFAULT on failure. + */ + .globl __do_int_store +__do_int_store: + ld [%o2], %g1 + cmp %o1, 2 + be 2f + cmp %o1, 4 + be 1f + srl %g1, 24, %g2 + srl %g1, 16, %g7 +4: stb %g2, [%o0] + srl %g1, 8, %g2 +5: stb %g7, [%o0 + 1] + ld [%o2 + 4], %g7 +6: stb %g2, [%o0 + 2] + srl %g7, 24, %g2 +7: stb %g1, [%o0 + 3] + srl %g7, 16, %g1 +8: stb %g2, [%o0 + 4] + srl %g7, 8, %g2 +9: stb %g1, [%o0 + 5] +10: stb %g2, [%o0 + 6] + b 0f +11: stb %g7, [%o0 + 7] +1: srl %g1, 16, %g7 +12: stb %g2, [%o0] + srl %g1, 8, %g2 +13: stb %g7, [%o0 + 1] +14: stb %g2, [%o0 + 2] + b 0f +15: stb %g1, [%o0 + 3] +2: srl %g1, 8, %g2 +16: stb %g2, [%o0] +17: stb %g1, [%o0 + 1] +0: retl + mov 0, %o0 + + .section __ex_table,#alloc + .word 4b, retl_efault + .word 5b, retl_efault + .word 6b, retl_efault + .word 7b, retl_efault + .word 8b, retl_efault + .word 9b, retl_efault + .word 10b, retl_efault + .word 11b, retl_efault + .word 12b, retl_efault + .word 13b, retl_efault + .word 14b, retl_efault + .word 15b, retl_efault + .word 16b, retl_efault + .word 17b, retl_efault + .previous + + /* int do_int_load(unsigned long *dest_reg, int size, + * unsigned long *saddr, int is_signed) + * + * %o0 = dest_reg + * %o1 = size + * %o2 = saddr + * %o3 = is_signed + * + * Return '0' on success, -EFAULT on failure. + */ + .globl do_int_load +do_int_load: + cmp %o1, 8 + be 9f + cmp %o1, 4 + be 6f +4: ldub [%o2], %g1 +5: ldub [%o2 + 1], %g2 + sll %g1, 8, %g1 + tst %o3 + be 3f + or %g1, %g2, %g1 + sll %g1, 16, %g1 + sra %g1, 16, %g1 +3: b 0f + st %g1, [%o0] +6: ldub [%o2 + 1], %g2 + sll %g1, 24, %g1 +7: ldub [%o2 + 2], %g7 + sll %g2, 16, %g2 +8: ldub [%o2 + 3], %g3 + sll %g7, 8, %g7 + or %g3, %g2, %g3 + or %g7, %g3, %g7 + or %g1, %g7, %g1 + b 0f + st %g1, [%o0] +9: ldub [%o2], %g1 +10: ldub [%o2 + 1], %g2 + sll %g1, 24, %g1 +11: ldub [%o2 + 2], %g7 + sll %g2, 16, %g2 +12: ldub [%o2 + 3], %g3 + sll %g7, 8, %g7 + or %g1, %g2, %g1 + or %g7, %g3, %g7 + or %g1, %g7, %g7 +13: ldub [%o2 + 4], %g1 + st %g7, [%o0] +14: ldub [%o2 + 5], %g2 + sll %g1, 24, %g1 +15: ldub [%o2 + 6], %g7 + sll %g2, 16, %g2 +16: ldub [%o2 + 7], %g3 + sll %g7, 8, %g7 + or %g1, %g2, %g1 + or %g7, %g3, %g7 + or %g1, %g7, %g7 + st %g7, [%o0 + 4] +0: retl + mov 0, %o0 + + .section __ex_table,#alloc + .word 4b, retl_efault + .word 5b, retl_efault + .word 6b, retl_efault + .word 7b, retl_efault + .word 8b, retl_efault + .word 9b, retl_efault + .word 10b, retl_efault + .word 11b, retl_efault + .word 12b, retl_efault + .word 13b, retl_efault + .word 14b, retl_efault + .word 15b, retl_efault + .word 16b, retl_efault + .previous diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S new file mode 100644 index 000000000..e256f395e --- /dev/null +++ b/arch/sparc/kernel/una_asm_64.S @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* una_asm.S: Kernel unaligned trap assembler helpers. + * + * Copyright (C) 1996,2005 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + + .text + + .globl __do_int_store +__do_int_store: + rd %asi, %o4 + wr %o3, 0, %asi + mov %o2, %g3 + cmp %o1, 2 + be,pn %icc, 2f + cmp %o1, 4 + be,pt %icc, 1f + srlx %g3, 24, %g2 + srlx %g3, 56, %g1 + srlx %g3, 48, %g7 +4: stba %g1, [%o0] %asi + srlx %g3, 40, %g1 +5: stba %g7, [%o0 + 1] %asi + srlx %g3, 32, %g7 +6: stba %g1, [%o0 + 2] %asi +7: stba %g7, [%o0 + 3] %asi + srlx %g3, 16, %g1 +8: stba %g2, [%o0 + 4] %asi + srlx %g3, 8, %g7 +9: stba %g1, [%o0 + 5] %asi +10: stba %g7, [%o0 + 6] %asi + ba,pt %xcc, 0f +11: stba %g3, [%o0 + 7] %asi +1: srl %g3, 16, %g7 +12: stba %g2, [%o0] %asi + srl %g3, 8, %g2 +13: stba %g7, [%o0 + 1] %asi +14: stba %g2, [%o0 + 2] %asi + ba,pt %xcc, 0f +15: stba %g3, [%o0 + 3] %asi +2: srl %g3, 8, %g2 +16: stba %g2, [%o0] %asi +17: stba %g3, [%o0 + 1] %asi +0: + wr %o4, 0x0, %asi + retl + mov 0, %o0 + .size __do_int_store, .-__do_int_store + + .section __ex_table,"a" + .word 4b, __retl_efault + .word 5b, __retl_efault + .word 6b, __retl_efault + .word 7b, __retl_efault + .word 8b, __retl_efault + .word 9b, __retl_efault + .word 10b, __retl_efault + .word 11b, __retl_efault + .word 12b, __retl_efault + .word 13b, __retl_efault + .word 14b, __retl_efault + .word 15b, __retl_efault + .word 16b, __retl_efault + .word 17b, __retl_efault + .previous + + .globl do_int_load +do_int_load: + rd %asi, %o5 + wr %o4, 0, %asi + cmp %o1, 8 + bge,pn %icc, 9f + cmp %o1, 4 + be,pt %icc, 6f +4: lduba [%o2] %asi, %g2 +5: lduba [%o2 + 1] %asi, %g3 + sll %g2, 8, %g2 + brz,pt %o3, 3f + add %g2, %g3, %g2 + sllx %g2, 48, %g2 + srax %g2, 48, %g2 +3: ba,pt %xcc, 0f + stx %g2, [%o0] +6: lduba [%o2 + 1] %asi, %g3 + sll %g2, 24, %g2 +7: lduba [%o2 + 2] %asi, %g7 + sll %g3, 16, %g3 +8: lduba [%o2 + 3] %asi, %g1 + sll %g7, 8, %g7 + or %g2, %g3, %g2 + or %g7, %g1, %g7 + or %g2, %g7, %g2 + brnz,a,pt %o3, 3f + sra %g2, 0, %g2 +3: ba,pt %xcc, 0f + stx %g2, [%o0] +9: lduba [%o2] %asi, %g2 +10: lduba [%o2 + 1] %asi, %g3 + sllx %g2, 56, %g2 +11: lduba [%o2 + 2] %asi, %g7 + sllx %g3, 48, %g3 +12: lduba [%o2 + 3] %asi, %g1 + sllx %g7, 40, %g7 + sllx %g1, 32, %g1 + or %g2, %g3, %g2 + or %g7, %g1, %g7 +13: lduba [%o2 + 4] %asi, %g3 + or %g2, %g7, %g7 +14: lduba [%o2 + 5] %asi, %g1 + sllx %g3, 24, %g3 +15: lduba [%o2 + 6] %asi, %g2 + sllx %g1, 16, %g1 + or %g7, %g3, %g7 +16: lduba [%o2 + 7] %asi, %g3 + sllx %g2, 8, %g2 + or %g7, %g1, %g7 + or %g2, %g3, %g2 + or %g7, %g2, %g7 + cmp %o1, 8 + be,a,pt %icc, 0f + stx %g7, [%o0] + srlx %g7, 32, %g2 + sra %g7, 0, %g7 + stx %g2, [%o0] + stx %g7, [%o0 + 8] +0: + wr %o5, 0x0, %asi + retl + mov 0, %o0 + .size do_int_load, .-do_int_load + + .section __ex_table,"a" + .word 4b, __retl_efault + .word 5b, __retl_efault + .word 6b, __retl_efault + .word 7b, __retl_efault + .word 8b, __retl_efault + .word 9b, __retl_efault + .word 10b, __retl_efault + .word 11b, __retl_efault + .word 12b, __retl_efault + .word 13b, __retl_efault + .word 14b, __retl_efault + .word 15b, __retl_efault + .word 16b, __retl_efault + .previous diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c new file mode 100644 index 000000000..455f0258c --- /dev/null +++ b/arch/sparc/kernel/unaligned_32.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * unaligned.c: Unaligned load/store trap handling with special + * cases for the kernel to do them more quickly. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "kernel.h" + +enum direction { + load, /* ld, ldd, ldh, ldsh */ + store, /* st, std, sth, stsh */ + both, /* Swap, ldstub, etc. */ + fpload, + fpstore, + invalid, +}; + +static inline enum direction decode_direction(unsigned int insn) +{ + unsigned long tmp = (insn >> 21) & 1; + + if(!tmp) + return load; + else { + if(((insn>>19)&0x3f) == 15) + return both; + else + return store; + } +} + +/* 8 = double-word, 4 = word, 2 = half-word */ +static inline int decode_access_size(unsigned int insn) +{ + insn = (insn >> 19) & 3; + + if(!insn) + return 4; + else if(insn == 3) + return 8; + else if(insn == 2) + return 2; + else { + printk("Impossible unaligned trap. insn=%08x\n", insn); + die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs); + return 4; /* just to keep gcc happy. */ + } +} + +/* 0x400000 = signed, 0 = unsigned */ +static inline int decode_signedness(unsigned int insn) +{ + return (insn & 0x400000); +} + +static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, + unsigned int rd) +{ + if(rs2 >= 16 || rs1 >= 16 || rd >= 16) { + /* Wheee... */ + __asm__ __volatile__("save %sp, -0x40, %sp\n\t" + "save %sp, -0x40, %sp\n\t" + "save %sp, -0x40, %sp\n\t" + "save %sp, -0x40, %sp\n\t" + "save %sp, -0x40, %sp\n\t" + "save %sp, -0x40, %sp\n\t" + "save %sp, -0x40, %sp\n\t" + "restore; restore; restore; restore;\n\t" + "restore; restore; restore;\n\t"); + } +} + +static inline int sign_extend_imm13(int imm) +{ + return imm << 19 >> 19; +} + +static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) +{ + struct reg_window32 *win; + + if(reg < 16) + return (!reg ? 0 : regs->u_regs[reg]); + + /* Ho hum, the slightly complicated case. */ + win = (struct reg_window32 *) regs->u_regs[UREG_FP]; + return win->locals[reg - 16]; /* yes, I know what this does... */ +} + +static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs) +{ + struct reg_window32 __user *win; + unsigned long ret; + + if (reg < 16) + return (!reg ? 0 : regs->u_regs[reg]); + + /* Ho hum, the slightly complicated case. */ + win = (struct reg_window32 __user *) regs->u_regs[UREG_FP]; + + if ((unsigned long)win & 3) + return -1; + + if (get_user(ret, &win->locals[reg - 16])) + return -1; + + return ret; +} + +static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) +{ + struct reg_window32 *win; + + if(reg < 16) + return ®s->u_regs[reg]; + win = (struct reg_window32 *) regs->u_regs[UREG_FP]; + return &win->locals[reg - 16]; +} + +static unsigned long compute_effective_address(struct pt_regs *regs, + unsigned int insn) +{ + unsigned int rs1 = (insn >> 14) & 0x1f; + unsigned int rs2 = insn & 0x1f; + unsigned int rd = (insn >> 25) & 0x1f; + + if(insn & 0x2000) { + maybe_flush_windows(rs1, 0, rd); + return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + } else { + maybe_flush_windows(rs1, rs2, rd); + return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); + } +} + +unsigned long safe_compute_effective_address(struct pt_regs *regs, + unsigned int insn) +{ + unsigned int rs1 = (insn >> 14) & 0x1f; + unsigned int rs2 = insn & 0x1f; + unsigned int rd = (insn >> 25) & 0x1f; + + if(insn & 0x2000) { + maybe_flush_windows(rs1, 0, rd); + return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + } else { + maybe_flush_windows(rs1, rs2, rd); + return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs)); + } +} + +/* This is just to make gcc think panic does return... */ +static void unaligned_panic(char *str) +{ + panic("%s", str); +} + +/* una_asm.S */ +extern int do_int_load(unsigned long *dest_reg, int size, + unsigned long *saddr, int is_signed); +extern int __do_int_store(unsigned long *dst_addr, int size, + unsigned long *src_val); + +static int do_int_store(int reg_num, int size, unsigned long *dst_addr, + struct pt_regs *regs) +{ + unsigned long zero[2] = { 0, 0 }; + unsigned long *src_val; + + if (reg_num) + src_val = fetch_reg_addr(reg_num, regs); + else { + src_val = &zero[0]; + if (size == 8) + zero[1] = fetch_reg(1, regs); + } + return __do_int_store(dst_addr, size, src_val); +} + +extern void smp_capture(void); +extern void smp_release(void); + +static inline void advance(struct pt_regs *regs) +{ + regs->pc = regs->npc; + regs->npc += 4; +} + +static inline int floating_point_load_or_store_p(unsigned int insn) +{ + return (insn >> 24) & 1; +} + +static inline int ok_for_kernel(unsigned int insn) +{ + return !floating_point_load_or_store_p(insn); +} + +static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) +{ + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->pc); + if (!entry) { + unsigned long address = compute_effective_address(regs, insn); + if(address < PAGE_SIZE) { + printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); + } else + printk(KERN_ALERT "Unable to handle kernel paging request in mna handler"); + printk(KERN_ALERT " at virtual address %08lx\n",address); + printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n", + (current->mm ? current->mm->context : + current->active_mm->context)); + printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n", + (current->mm ? (unsigned long) current->mm->pgd : + (unsigned long) current->active_mm->pgd)); + die_if_kernel("Oops", regs); + /* Not reached */ + } + regs->pc = entry->fixup; + regs->npc = regs->pc + 4; +} + +asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) +{ + enum direction dir = decode_direction(insn); + int size = decode_access_size(insn); + + if(!ok_for_kernel(insn) || dir == both) { + printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", + regs->pc); + unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); + } else { + unsigned long addr = compute_effective_address(regs, insn); + int err; + + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + switch (dir) { + case load: + err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), + regs), + size, (unsigned long *) addr, + decode_signedness(insn)); + break; + + case store: + err = do_int_store(((insn>>25)&0x1f), size, + (unsigned long *) addr, regs); + break; + default: + panic("Impossible kernel unaligned trap."); + /* Not reached... */ + } + if (err) + kernel_mna_trap_fault(regs, insn); + else + advance(regs); + } +} + +asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) +{ + send_sig_fault(SIGBUS, BUS_ADRALN, + (void __user *)safe_compute_effective_address(regs, insn), + current); +} diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c new file mode 100644 index 000000000..23db2efda --- /dev/null +++ b/arch/sparc/kernel/unaligned_64.c @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * unaligned.c: Unaligned load/store trap handling with special + * cases for the kernel to do them more quickly. + * + * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "entry.h" +#include "kernel.h" + +enum direction { + load, /* ld, ldd, ldh, ldsh */ + store, /* st, std, sth, stsh */ + both, /* Swap, ldstub, cas, ... */ + fpld, + fpst, + invalid, +}; + +static inline enum direction decode_direction(unsigned int insn) +{ + unsigned long tmp = (insn >> 21) & 1; + + if (!tmp) + return load; + else { + switch ((insn>>19)&0xf) { + case 15: /* swap* */ + return both; + default: + return store; + } + } +} + +/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ +static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) +{ + unsigned int tmp; + + tmp = ((insn >> 19) & 0xf); + if (tmp == 11 || tmp == 14) /* ldx/stx */ + return 8; + tmp &= 3; + if (!tmp) + return 4; + else if (tmp == 3) + return 16; /* ldd/std - Although it is actually 8 */ + else if (tmp == 2) + return 2; + else { + printk("Impossible unaligned trap. insn=%08x\n", insn); + die_if_kernel("Byte sized unaligned access?!?!", regs); + + /* GCC should never warn that control reaches the end + * of this function without returning a value because + * die_if_kernel() is marked with attribute 'noreturn'. + * Alas, some versions do... + */ + + return 0; + } +} + +static inline int decode_asi(unsigned int insn, struct pt_regs *regs) +{ + if (insn & 0x800000) { + if (insn & 0x2000) + return (unsigned char)(regs->tstate >> 24); /* %asi */ + else + return (unsigned char)(insn >> 5); /* imm_asi */ + } else + return ASI_P; +} + +/* 0x400000 = signed, 0 = unsigned */ +static inline int decode_signedness(unsigned int insn) +{ + return (insn & 0x400000); +} + +static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, + unsigned int rd, int from_kernel) +{ + if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { + if (from_kernel != 0) + __asm__ __volatile__("flushw"); + else + flushw_user(); + } +} + +static inline long sign_extend_imm13(long imm) +{ + return imm << 51 >> 51; +} + +static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) +{ + unsigned long value, fp; + + if (reg < 16) + return (!reg ? 0 : regs->u_regs[reg]); + + fp = regs->u_regs[UREG_FP]; + + if (regs->tstate & TSTATE_PRIV) { + struct reg_window *win; + win = (struct reg_window *)(fp + STACK_BIAS); + value = win->locals[reg - 16]; + } else if (!test_thread_64bit_stack(fp)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); + get_user(value, &win32->locals[reg - 16]); + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(fp + STACK_BIAS); + get_user(value, &win->locals[reg - 16]); + } + return value; +} + +static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) +{ + unsigned long fp; + + if (reg < 16) + return ®s->u_regs[reg]; + + fp = regs->u_regs[UREG_FP]; + + if (regs->tstate & TSTATE_PRIV) { + struct reg_window *win; + win = (struct reg_window *)(fp + STACK_BIAS); + return &win->locals[reg - 16]; + } else if (!test_thread_64bit_stack(fp)) { + struct reg_window32 *win32; + win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); + return (unsigned long *)&win32->locals[reg - 16]; + } else { + struct reg_window *win; + win = (struct reg_window *)(fp + STACK_BIAS); + return &win->locals[reg - 16]; + } +} + +unsigned long compute_effective_address(struct pt_regs *regs, + unsigned int insn, unsigned int rd) +{ + int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + unsigned int rs1 = (insn >> 14) & 0x1f; + unsigned int rs2 = insn & 0x1f; + unsigned long addr; + + if (insn & 0x2000) { + maybe_flush_windows(rs1, 0, rd, from_kernel); + addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + } else { + maybe_flush_windows(rs1, rs2, rd, from_kernel); + addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); + } + + if (!from_kernel && test_thread_flag(TIF_32BIT)) + addr &= 0xffffffff; + + return addr; +} + +/* This is just to make gcc think die_if_kernel does return... */ +static void __used unaligned_panic(char *str, struct pt_regs *regs) +{ + die_if_kernel(str, regs); +} + +extern int do_int_load(unsigned long *dest_reg, int size, + unsigned long *saddr, int is_signed, int asi); + +extern int __do_int_store(unsigned long *dst_addr, int size, + unsigned long src_val, int asi); + +static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, + struct pt_regs *regs, int asi, int orig_asi) +{ + unsigned long zero = 0; + unsigned long *src_val_p = &zero; + unsigned long src_val; + + if (size == 16) { + size = 8; + zero = (((long)(reg_num ? + (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) | + (unsigned int)fetch_reg(reg_num + 1, regs); + } else if (reg_num) { + src_val_p = fetch_reg_addr(reg_num, regs); + } + src_val = *src_val_p; + if (unlikely(asi != orig_asi)) { + switch (size) { + case 2: + src_val = swab16(src_val); + break; + case 4: + src_val = swab32(src_val); + break; + case 8: + src_val = swab64(src_val); + break; + case 16: + default: + BUG(); + break; + } + } + return __do_int_store(dst_addr, size, src_val, asi); +} + +static inline void advance(struct pt_regs *regs) +{ + regs->tpc = regs->tnpc; + regs->tnpc += 4; + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } +} + +static inline int floating_point_load_or_store_p(unsigned int insn) +{ + return (insn >> 24) & 1; +} + +static inline int ok_for_kernel(unsigned int insn) +{ + return !floating_point_load_or_store_p(insn); +} + +static void kernel_mna_trap_fault(int fixup_tstate_asi) +{ + struct pt_regs *regs = current_thread_info()->kern_una_regs; + unsigned int insn = current_thread_info()->kern_una_insn; + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->tpc); + if (!entry) { + unsigned long address; + + address = compute_effective_address(regs, insn, + ((insn >> 25) & 0x1f)); + if (address < PAGE_SIZE) { + printk(KERN_ALERT "Unable to handle kernel NULL " + "pointer dereference in mna handler"); + } else + printk(KERN_ALERT "Unable to handle kernel paging " + "request in mna handler"); + printk(KERN_ALERT " at virtual address %016lx\n",address); + printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n", + (current->mm ? CTX_HWBITS(current->mm->context) : + CTX_HWBITS(current->active_mm->context))); + printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n", + (current->mm ? (unsigned long) current->mm->pgd : + (unsigned long) current->active_mm->pgd)); + die_if_kernel("Oops", regs); + /* Not reached */ + } + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + + if (fixup_tstate_asi) { + regs->tstate &= ~TSTATE_ASI; + regs->tstate |= (ASI_AIUS << 24UL); + } +} + +static void log_unaligned(struct pt_regs *regs) +{ + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); + + if (__ratelimit(&ratelimit)) { + printk("Kernel unaligned access at TPC[%lx] %pS\n", + regs->tpc, (void *) regs->tpc); + } +} + +asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) +{ + enum direction dir = decode_direction(insn); + int size = decode_access_size(regs, insn); + int orig_asi, asi; + + current_thread_info()->kern_una_regs = regs; + current_thread_info()->kern_una_insn = insn; + + orig_asi = asi = decode_asi(insn, regs); + + /* If this is a {get,put}_user() on an unaligned userspace pointer, + * just signal a fault and do not log the event. + */ + if (asi == ASI_AIUS) { + kernel_mna_trap_fault(0); + return; + } + + log_unaligned(regs); + + if (!ok_for_kernel(insn) || dir == both) { + printk("Unsupported unaligned load/store trap for kernel " + "at <%016lx>.\n", regs->tpc); + unaligned_panic("Kernel does fpu/atomic " + "unaligned load/store.", regs); + + kernel_mna_trap_fault(0); + } else { + unsigned long addr, *reg_addr; + int err; + + addr = compute_effective_address(regs, insn, + ((insn >> 25) & 0x1f)); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + switch (asi) { + case ASI_NL: + case ASI_AIUPL: + case ASI_AIUSL: + case ASI_PL: + case ASI_SL: + case ASI_PNFL: + case ASI_SNFL: + asi &= ~0x08; + break; + } + switch (dir) { + case load: + reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); + err = do_int_load(reg_addr, size, + (unsigned long *) addr, + decode_signedness(insn), asi); + if (likely(!err) && unlikely(asi != orig_asi)) { + unsigned long val_in = *reg_addr; + switch (size) { + case 2: + val_in = swab16(val_in); + break; + case 4: + val_in = swab32(val_in); + break; + case 8: + val_in = swab64(val_in); + break; + case 16: + default: + BUG(); + break; + } + *reg_addr = val_in; + } + break; + + case store: + err = do_int_store(((insn>>25)&0x1f), size, + (unsigned long *) addr, regs, + asi, orig_asi); + break; + + default: + panic("Impossible kernel unaligned trap."); + /* Not reached... */ + } + if (unlikely(err)) + kernel_mna_trap_fault(1); + else + advance(regs); + } +} + +int handle_popc(u32 insn, struct pt_regs *regs) +{ + int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + int ret, rd = ((insn >> 25) & 0x1f); + u64 value; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); + if (insn & 0x2000) { + maybe_flush_windows(0, 0, rd, from_kernel); + value = sign_extend_imm13(insn); + } else { + maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); + value = fetch_reg(insn & 0x1f, regs); + } + ret = hweight64(value); + if (rd < 16) { + if (rd) + regs->u_regs[rd] = ret; + } else { + unsigned long fp = regs->u_regs[UREG_FP]; + + if (!test_thread_64bit_stack(fp)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); + put_user(ret, &win32->locals[rd - 16]); + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(fp + STACK_BIAS); + put_user(ret, &win->locals[rd - 16]); + } + } + advance(regs); + return 1; +} + +extern void do_fpother(struct pt_regs *regs); +extern void do_privact(struct pt_regs *regs); +extern void sun4v_data_access_exception(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); + +int handle_ldf_stq(u32 insn, struct pt_regs *regs) +{ + unsigned long addr = compute_effective_address(regs, insn, 0); + int freg; + struct fpustate *f = FPUSTATE; + int asi = decode_asi(insn, regs); + int flag; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); + + save_and_clear_fpu(); + current_thread_info()->xfsr[0] &= ~0x1c000; + if (insn & 0x200000) { + /* STQ */ + u64 first = 0, second = 0; + + freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); + flag = (freg < 32) ? FPRS_DL : FPRS_DU; + if (freg & 3) { + current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; + do_fpother(regs); + return 0; + } + if (current_thread_info()->fpsaved[0] & flag) { + first = *(u64 *)&f->regs[freg]; + second = *(u64 *)&f->regs[freg+2]; + } + if (asi < 0x80) { + do_privact(regs); + return 1; + } + switch (asi) { + case ASI_P: + case ASI_S: break; + case ASI_PL: + case ASI_SL: + { + /* Need to convert endians */ + u64 tmp = __swab64p(&first); + + first = __swab64p(&second); + second = tmp; + break; + } + default: + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, addr, 0); + else + spitfire_data_access_exception(regs, 0, addr); + return 1; + } + if (put_user (first >> 32, (u32 __user *)addr) || + __put_user ((u32)first, (u32 __user *)(addr + 4)) || + __put_user (second >> 32, (u32 __user *)(addr + 8)) || + __put_user ((u32)second, (u32 __user *)(addr + 12))) { + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, addr, 0); + else + spitfire_data_access_exception(regs, 0, addr); + return 1; + } + } else { + /* LDF, LDDF, LDQF */ + u32 data[4] __attribute__ ((aligned(8))); + int size, i; + int err; + + if (asi < 0x80) { + do_privact(regs); + return 1; + } else if (asi > ASI_SNFL) { + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, addr, 0); + else + spitfire_data_access_exception(regs, 0, addr); + return 1; + } + switch (insn & 0x180000) { + case 0x000000: size = 1; break; + case 0x100000: size = 4; break; + default: size = 2; break; + } + if (size == 1) + freg = (insn >> 25) & 0x1f; + else + freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); + flag = (freg < 32) ? FPRS_DL : FPRS_DU; + + for (i = 0; i < size; i++) + data[i] = 0; + + err = get_user (data[0], (u32 __user *) addr); + if (!err) { + for (i = 1; i < size; i++) + err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); + } + if (err && !(asi & 0x2 /* NF */)) { + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, addr, 0); + else + spitfire_data_access_exception(regs, 0, addr); + return 1; + } + if (asi & 0x8) /* Little */ { + u64 tmp; + + switch (size) { + case 1: data[0] = le32_to_cpup(data + 0); break; + default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0)); + break; + case 4: tmp = le64_to_cpup((u64 *)(data + 0)); + *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2)); + *(u64 *)(data + 2) = tmp; + break; + } + } + if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { + current_thread_info()->fpsaved[0] = FPRS_FEF; + current_thread_info()->gsr[0] = 0; + } + if (!(current_thread_info()->fpsaved[0] & flag)) { + if (freg < 32) + memset(f->regs, 0, 32*sizeof(u32)); + else + memset(f->regs+32, 0, 32*sizeof(u32)); + } + memcpy(f->regs + freg, data, size * 4); + current_thread_info()->fpsaved[0] |= flag; + } + advance(regs); + return 1; +} + +void handle_ld_nf(u32 insn, struct pt_regs *regs) +{ + int rd = ((insn >> 25) & 0x1f); + int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + unsigned long *reg; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); + + maybe_flush_windows(0, 0, rd, from_kernel); + reg = fetch_reg_addr(rd, regs); + if (from_kernel || rd < 16) { + reg[0] = 0; + if ((insn & 0x780000) == 0x180000) + reg[1] = 0; + } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { + put_user(0, (int __user *) reg); + if ((insn & 0x780000) == 0x180000) + put_user(0, ((int __user *) reg) + 1); + } else { + put_user(0, (unsigned long __user *) reg); + if ((insn & 0x780000) == 0x180000) + put_user(0, (unsigned long __user *) reg + 1); + } + advance(regs); +} + +void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) +{ + enum ctx_state prev_state = exception_enter(); + unsigned long pc = regs->tpc; + unsigned long tstate = regs->tstate; + u32 insn; + u64 value; + u8 freg; + int flag; + struct fpustate *f = FPUSTATE; + + if (tstate & TSTATE_PRIV) + die_if_kernel("lddfmna from kernel", regs); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); + if (test_thread_flag(TIF_32BIT)) + pc = (u32)pc; + if (get_user(insn, (u32 __user *) pc) != -EFAULT) { + int asi = decode_asi(insn, regs); + u32 first, second; + int err; + + if ((asi > ASI_SNFL) || + (asi < ASI_P)) + goto daex; + first = second = 0; + err = get_user(first, (u32 __user *)sfar); + if (!err) + err = get_user(second, (u32 __user *)(sfar + 4)); + if (err) { + if (!(asi & 0x2)) + goto daex; + first = second = 0; + } + save_and_clear_fpu(); + freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); + value = (((u64)first) << 32) | second; + if (asi & 0x8) /* Little */ + value = __swab64p(&value); + flag = (freg < 32) ? FPRS_DL : FPRS_DU; + if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { + current_thread_info()->fpsaved[0] = FPRS_FEF; + current_thread_info()->gsr[0] = 0; + } + if (!(current_thread_info()->fpsaved[0] & flag)) { + if (freg < 32) + memset(f->regs, 0, 32*sizeof(u32)); + else + memset(f->regs+32, 0, 32*sizeof(u32)); + } + *(u64 *)(f->regs + freg) = value; + current_thread_info()->fpsaved[0] |= flag; + } else { +daex: + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, sfar, sfsr); + else + spitfire_data_access_exception(regs, sfsr, sfar); + goto out; + } + advance(regs); +out: + exception_exit(prev_state); +} + +void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) +{ + enum ctx_state prev_state = exception_enter(); + unsigned long pc = regs->tpc; + unsigned long tstate = regs->tstate; + u32 insn; + u64 value; + u8 freg; + int flag; + struct fpustate *f = FPUSTATE; + + if (tstate & TSTATE_PRIV) + die_if_kernel("stdfmna from kernel", regs); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); + if (test_thread_flag(TIF_32BIT)) + pc = (u32)pc; + if (get_user(insn, (u32 __user *) pc) != -EFAULT) { + int asi = decode_asi(insn, regs); + freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); + value = 0; + flag = (freg < 32) ? FPRS_DL : FPRS_DU; + if ((asi > ASI_SNFL) || + (asi < ASI_P)) + goto daex; + save_and_clear_fpu(); + if (current_thread_info()->fpsaved[0] & flag) + value = *(u64 *)&f->regs[freg]; + switch (asi) { + case ASI_P: + case ASI_S: break; + case ASI_PL: + case ASI_SL: + value = __swab64p(&value); break; + default: goto daex; + } + if (put_user (value >> 32, (u32 __user *) sfar) || + __put_user ((u32)value, (u32 __user *)(sfar + 4))) + goto daex; + } else { +daex: + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, sfar, sfsr); + else + spitfire_data_access_exception(regs, sfsr, sfar); + goto out; + } + advance(regs); +out: + exception_exit(prev_state); +} diff --git a/arch/sparc/kernel/uprobes.c b/arch/sparc/kernel/uprobes.c new file mode 100644 index 000000000..1a0600206 --- /dev/null +++ b/arch/sparc/kernel/uprobes.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * User-space Probes (UProbes) for sparc + * + * Copyright (C) 2013 Oracle Inc. + * + * Authors: + * Jose E. Marchesi + * Eric Saint Etienne + */ + +#include +#include +#include +#include +#include /* For struct task_struct */ +#include + +#include + +/* Compute the address of the breakpoint instruction and return it. + * + * Note that uprobe_get_swbp_addr is defined as a weak symbol in + * kernel/events/uprobe.c. + */ +unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) +{ + return instruction_pointer(regs); +} + +static void copy_to_page(struct page *page, unsigned long vaddr, + const void *src, int len) +{ + void *kaddr = kmap_atomic(page); + + memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); + kunmap_atomic(kaddr); +} + +/* Fill in the xol area with the probed instruction followed by the + * single-step trap. Some fixups in the copied instruction are + * performed at this point. + * + * Note that uprobe_xol_copy is defined as a weak symbol in + * kernel/events/uprobe.c. + */ +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + const u32 stp_insn = UPROBE_STP_INSN; + u32 insn = *(u32 *) src; + + /* Branches annulling their delay slot must be fixed to not do + * so. Clearing the annul bit on these instructions we can be + * sure the single-step breakpoint in the XOL slot will be + * executed. + */ + + u32 op = (insn >> 30) & 0x3; + u32 op2 = (insn >> 22) & 0x7; + + if (op == 0 && + (op2 == 1 || op2 == 2 || op2 == 3 || op2 == 5 || op2 == 6) && + (insn & ANNUL_BIT) == ANNUL_BIT) + insn &= ~ANNUL_BIT; + + copy_to_page(page, vaddr, &insn, len); + copy_to_page(page, vaddr+len, &stp_insn, 4); +} + + +/* Instruction analysis/validity. + * + * This function returns 0 on success or a -ve number on error. + */ +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, + struct mm_struct *mm, unsigned long addr) +{ + /* Any unsupported instruction? Then return -EINVAL */ + return 0; +} + +/* If INSN is a relative control transfer instruction, return the + * corrected branch destination value. + * + * Note that regs->tpc and regs->tnpc still hold the values of the + * program counters at the time of the single-step trap due to the + * execution of the UPROBE_STP_INSN at utask->xol_vaddr + 4. + * + */ +static unsigned long relbranch_fixup(u32 insn, struct uprobe_task *utask, + struct pt_regs *regs) +{ + /* Branch not taken, no mods necessary. */ + if (regs->tnpc == regs->tpc + 0x4UL) + return utask->autask.saved_tnpc + 0x4UL; + + /* The three cases are call, branch w/prediction, + * and traditional branch. + */ + if ((insn & 0xc0000000) == 0x40000000 || + (insn & 0xc1c00000) == 0x00400000 || + (insn & 0xc1c00000) == 0x00800000) { + unsigned long real_pc = (unsigned long) utask->vaddr; + unsigned long ixol_addr = utask->xol_vaddr; + + /* The instruction did all the work for us + * already, just apply the offset to the correct + * instruction location. + */ + return (real_pc + (regs->tnpc - ixol_addr)); + } + + /* It is jmpl or some other absolute PC modification instruction, + * leave NPC as-is. + */ + return regs->tnpc; +} + +/* If INSN is an instruction which writes its PC location + * into a destination register, fix that up. + */ +static int retpc_fixup(struct pt_regs *regs, u32 insn, + unsigned long real_pc) +{ + unsigned long *slot = NULL; + int rc = 0; + + /* Simplest case is 'call', which always uses %o7 */ + if ((insn & 0xc0000000) == 0x40000000) + slot = ®s->u_regs[UREG_I7]; + + /* 'jmpl' encodes the register inside of the opcode */ + if ((insn & 0xc1f80000) == 0x81c00000) { + unsigned long rd = ((insn >> 25) & 0x1f); + + if (rd <= 15) { + slot = ®s->u_regs[rd]; + } else { + unsigned long fp = regs->u_regs[UREG_FP]; + /* Hard case, it goes onto the stack. */ + flushw_all(); + + rd -= 16; + if (test_thread_64bit_stack(fp)) { + unsigned long __user *uslot = + (unsigned long __user *) (fp + STACK_BIAS) + rd; + rc = __put_user(real_pc, uslot); + } else { + unsigned int __user *uslot = (unsigned int + __user *) fp + rd; + rc = __put_user((u32) real_pc, uslot); + } + } + } + if (slot != NULL) + *slot = real_pc; + return rc; +} + +/* Single-stepping can be avoided for certain instructions: NOPs and + * instructions that can be emulated. This function determines + * whether the instruction where the uprobe is installed falls in one + * of these cases and emulates it. + * + * This function returns true if the single-stepping can be skipped, + * false otherwise. + */ +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + /* We currently only emulate NOP instructions. + */ + + if (auprobe->ixol == (1 << 24)) { + regs->tnpc += 4; + regs->tpc += 4; + return true; + } + + return false; +} + +/* Prepare to execute out of line. At this point + * current->utask->xol_vaddr points to an allocated XOL slot properly + * initialized with the original instruction and the single-stepping + * trap instruction. + * + * This function returns 0 on success, any other number on error. + */ +int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + struct arch_uprobe_task *autask = ¤t->utask->autask; + + /* Save the current program counters so they can be restored + * later. + */ + autask->saved_tpc = regs->tpc; + autask->saved_tnpc = regs->tnpc; + + /* Adjust PC and NPC so the first instruction in the XOL slot + * will be executed by the user task. + */ + instruction_pointer_set(regs, utask->xol_vaddr); + + return 0; +} + +/* Prepare to resume execution after the single-step. Called after + * single-stepping. To avoid the SMP problems that can occur when we + * temporarily put back the original opcode to single-step, we + * single-stepped a copy of the instruction. + * + * This function returns 0 on success, any other number on error. + */ +int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + struct arch_uprobe_task *autask = &utask->autask; + u32 insn = auprobe->ixol; + int rc = 0; + + if (utask->state == UTASK_SSTEP_ACK) { + regs->tnpc = relbranch_fixup(insn, utask, regs); + regs->tpc = autask->saved_tnpc; + rc = retpc_fixup(regs, insn, (unsigned long) utask->vaddr); + } else { + regs->tnpc = utask->vaddr+4; + regs->tpc = autask->saved_tnpc+4; + } + return rc; +} + +/* Handler for uprobe traps. This is called from the traps table and + * triggers the proper die notification. + */ +asmlinkage void uprobe_trap(struct pt_regs *regs, + unsigned long trap_level) +{ + BUG_ON(trap_level != 0x173 && trap_level != 0x174); + + /* We are only interested in user-mode code. Uprobe traps + * shall not be present in kernel code. + */ + if (!user_mode(regs)) { + local_irq_enable(); + bad_trap(regs, trap_level); + return; + } + + /* trap_level == 0x173 --> ta 0x73 + * trap_level == 0x174 --> ta 0x74 + */ + if (notify_die((trap_level == 0x173) ? DIE_BPT : DIE_SSTEP, + (trap_level == 0x173) ? "bpt" : "sstep", + regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) + bad_trap(regs, trap_level); +} + +/* Callback routine for handling die notifications. +*/ +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + int ret = NOTIFY_DONE; + struct die_args *args = (struct die_args *)data; + + /* We are only interested in userspace traps */ + if (args->regs && !user_mode(args->regs)) + return NOTIFY_DONE; + + switch (val) { + case DIE_BPT: + if (uprobe_pre_sstep_notifier(args->regs)) + ret = NOTIFY_STOP; + break; + + case DIE_SSTEP: + if (uprobe_post_sstep_notifier(args->regs)) + ret = NOTIFY_STOP; + + default: + break; + } + + return ret; +} + +/* This function gets called when a XOL instruction either gets + * trapped or the thread has a fatal signal, so reset the instruction + * pointer to its probed address. + */ +void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + instruction_pointer_set(regs, utask->vaddr); +} + +/* If xol insn itself traps and generates a signal(Say, + * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped + * instruction jumps back to its own address. + */ +bool arch_uprobe_xol_was_trapped(struct task_struct *t) +{ + return false; +} + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, + struct pt_regs *regs) +{ + unsigned long orig_ret_vaddr = regs->u_regs[UREG_I7]; + + regs->u_regs[UREG_I7] = trampoline_vaddr-8; + + return orig_ret_vaddr + 8; +} diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S new file mode 100644 index 000000000..e4cee7be5 --- /dev/null +++ b/arch/sparc/kernel/urtt_fill.S @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include + + .text + .align 8 + .globl user_rtt_fill_fixup_common +user_rtt_fill_fixup_common: + rdpr %cwp, %g1 + add %g1, 1, %g1 + wrpr %g1, 0x0, %cwp + + rdpr %wstate, %g2 + sll %g2, 3, %g2 + wrpr %g2, 0x0, %wstate + + /* We know %canrestore and %otherwin are both zero. */ + + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + + sethi %hi(KERNBASE), %g1 + flush %g1 + + mov %g4, %l4 + mov %g5, %l5 + brnz,pn %g3, 1f + mov %g3, %l3 + + or %g4, FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] +1: + mov %g6, %l1 + wrpr %g0, 0x0, %tl + +661: nop + .section .sun4v_1insn_patch, "ax" + .word 661b + SET_GL(0) + .previous + +661: wrpr %g0, RTRAP_PSTATE, %pstate + .section .sun_m7_1insn_patch, "ax" + .word 661b + /* Re-enable PSTATE.mcde to maintain ADI security */ + wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate + .previous + + mov %l1, %g6 + ldx [%g6 + TI_TASK], %g4 + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) + + brnz,pn %l3, 1f + nop + + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + +1: cmp %g3, 2 + bne,pn %xcc, 2f + nop + + sethi %hi(tlb_type), %g1 + lduw [%g1 + %lo(tlb_type)], %g1 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + mov %l4, %o2 + call sun4v_do_mna + mov %l5, %o1 + ba,a,pt %xcc, rtrap +1: mov %l4, %o1 + mov %l5, %o2 + call mem_address_unaligned + nop + ba,a,pt %xcc, rtrap + +2: sethi %hi(tlb_type), %g1 + mov %l4, %o1 + lduw [%g1 + %lo(tlb_type)], %g1 + mov %l5, %o2 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + call sun4v_data_access_exception + nop + ba,a,pt %xcc, rtrap + nop + +1: call spitfire_data_access_exception + nop + ba,a,pt %xcc, rtrap diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S new file mode 100644 index 000000000..7a2d9a9be --- /dev/null +++ b/arch/sparc/kernel/utrap.S @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + .globl utrap_trap + .type utrap_trap,#function +utrap_trap: /* %g3=handler,%g4=level */ + TRAP_LOAD_THREAD_REG(%g6, %g1) + ldx [%g6 + TI_UTRAPS], %g1 + brnz,pt %g1, invoke_utrap + nop + + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + call bad_trap + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + +invoke_utrap: + sllx %g3, 3, %g3 + ldx [%g1 + %g3], %g1 + save %sp, -128, %sp + rdpr %tstate, %l6 + rdpr %cwp, %l7 + andn %l6, TSTATE_CWP, %l6 + wrpr %l6, %l7, %tstate + rdpr %tpc, %l6 + rdpr %tnpc, %l7 + wrpr %g1, 0, %tnpc + done + .size utrap_trap,.-utrap_trap diff --git a/arch/sparc/kernel/vdso.c b/arch/sparc/kernel/vdso.c new file mode 100644 index 000000000..0e27437eb --- /dev/null +++ b/arch/sparc/kernel/vdso.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2001 Andrea Arcangeli SuSE + * Copyright 2003 Andi Kleen, SuSE Labs. + * + * Thanks to hpa@transmeta.com for some useful hint. + * Special thanks to Ingo Molnar for his early experience with + * a different vsyscall implementation for Linux/IA32 and for the name. + */ + +#include +#include + +#include + +void update_vsyscall_tz(void) +{ + if (unlikely(vvar_data == NULL)) + return; + + vvar_data->tz_minuteswest = sys_tz.tz_minuteswest; + vvar_data->tz_dsttime = sys_tz.tz_dsttime; +} + +void update_vsyscall(struct timekeeper *tk) +{ + struct vvar_data *vdata = vvar_data; + + if (unlikely(vdata == NULL)) + return; + + vvar_write_begin(vdata); + vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; + vdata->clock.cycle_last = tk->tkr_mono.cycle_last; + vdata->clock.mask = tk->tkr_mono.mask; + vdata->clock.mult = tk->tkr_mono.mult; + vdata->clock.shift = tk->tkr_mono.shift; + + vdata->wall_time_sec = tk->xtime_sec; + vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec; + + vdata->monotonic_time_sec = tk->xtime_sec + + tk->wall_to_monotonic.tv_sec; + vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec + + (tk->wall_to_monotonic.tv_nsec << + tk->tkr_mono.shift); + + while (vdata->monotonic_time_snsec >= + (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { + vdata->monotonic_time_snsec -= + ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; + vdata->monotonic_time_sec++; + } + + vdata->wall_time_coarse_sec = tk->xtime_sec; + vdata->wall_time_coarse_nsec = + (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); + + vdata->monotonic_time_coarse_sec = + vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; + vdata->monotonic_time_coarse_nsec = + vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec; + + while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) { + vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC; + vdata->monotonic_time_coarse_sec++; + } + + vvar_write_end(vdata); +} diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c new file mode 100644 index 000000000..01122a208 --- /dev/null +++ b/arch/sparc/kernel/vio.c @@ -0,0 +1,573 @@ +// SPDX-License-Identifier: GPL-2.0 +/* vio.c: Virtual I/O channel devices probing infrastructure. + * + * Copyright (c) 2003-2005 IBM Corp. + * Dave Engebretsen engebret@us.ibm.com + * Santiago Leon santil@us.ibm.com + * Hollis Blanchard + * Stephen Rothwell + * + * Adapted to sparc64 by David S. Miller davem@davemloft.net + */ + +#include +#include +#include +#include +#include + +#include +#include + +static const struct vio_device_id *vio_match_device( + const struct vio_device_id *matches, + const struct vio_dev *dev) +{ + const char *type, *compat; + int len; + + type = dev->type; + compat = dev->compat; + len = dev->compat_len; + + while (matches->type[0] || matches->compat[0]) { + int match = 1; + if (matches->type[0]) + match &= !strcmp(matches->type, type); + + if (matches->compat[0]) { + match &= len && + of_find_in_proplist(compat, matches->compat, len); + } + if (match) + return matches; + matches++; + } + return NULL; +} + +static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) +{ + const struct vio_dev *vio_dev = to_vio_dev(dev); + + add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat); + return 0; +} + +static int vio_bus_match(struct device *dev, struct device_driver *drv) +{ + struct vio_dev *vio_dev = to_vio_dev(dev); + struct vio_driver *vio_drv = to_vio_driver(drv); + const struct vio_device_id *matches = vio_drv->id_table; + + if (!matches) + return 0; + + return vio_match_device(matches, vio_dev) != NULL; +} + +static int vio_device_probe(struct device *dev) +{ + struct vio_dev *vdev = to_vio_dev(dev); + struct vio_driver *drv = to_vio_driver(dev->driver); + const struct vio_device_id *id; + + if (!drv->probe) + return -ENODEV; + + id = vio_match_device(drv->id_table, vdev); + if (!id) + return -ENODEV; + + /* alloc irqs (unless the driver specified not to) */ + if (!drv->no_irq) { + if (vdev->tx_irq == 0 && vdev->tx_ino != ~0UL) + vdev->tx_irq = sun4v_build_virq(vdev->cdev_handle, + vdev->tx_ino); + + if (vdev->rx_irq == 0 && vdev->rx_ino != ~0UL) + vdev->rx_irq = sun4v_build_virq(vdev->cdev_handle, + vdev->rx_ino); + } + + return drv->probe(vdev, id); +} + +static void vio_device_remove(struct device *dev) +{ + struct vio_dev *vdev = to_vio_dev(dev); + struct vio_driver *drv = to_vio_driver(dev->driver); + + if (drv->remove) { + /* + * Ideally, we would remove/deallocate tx/rx virqs + * here - however, there are currently no support + * routines to do so at the moment. TBD + */ + + drv->remove(vdev); + } +} + +static ssize_t devspec_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct vio_dev *vdev = to_vio_dev(dev); + const char *str = "none"; + + if (!strcmp(vdev->type, "vnet-port")) + str = "vnet"; + else if (!strcmp(vdev->type, "vdc-port")) + str = "vdisk"; + + return sprintf(buf, "%s\n", str); +} +static DEVICE_ATTR_RO(devspec); + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct vio_dev *vdev = to_vio_dev(dev); + return sprintf(buf, "%s\n", vdev->type); +} +static DEVICE_ATTR_RO(type); + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const struct vio_dev *vdev = to_vio_dev(dev); + + return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat); +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *vio_dev_attrs[] = { + &dev_attr_devspec.attr, + &dev_attr_type.attr, + &dev_attr_modalias.attr, + NULL, + }; +ATTRIBUTE_GROUPS(vio_dev); + +static struct bus_type vio_bus_type = { + .name = "vio", + .dev_groups = vio_dev_groups, + .uevent = vio_hotplug, + .match = vio_bus_match, + .probe = vio_device_probe, + .remove = vio_device_remove, +}; + +int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, + const char *mod_name) +{ + viodrv->driver.bus = &vio_bus_type; + viodrv->driver.name = viodrv->name; + viodrv->driver.owner = owner; + viodrv->driver.mod_name = mod_name; + + return driver_register(&viodrv->driver); +} +EXPORT_SYMBOL(__vio_register_driver); + +void vio_unregister_driver(struct vio_driver *viodrv) +{ + driver_unregister(&viodrv->driver); +} +EXPORT_SYMBOL(vio_unregister_driver); + +static void vio_dev_release(struct device *dev) +{ + kfree(to_vio_dev(dev)); +} + +static ssize_t +show_pciobppath_attr(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct vio_dev *vdev; + struct device_node *dp; + + vdev = to_vio_dev(dev); + dp = vdev->dp; + + return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp); +} + +static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, + show_pciobppath_attr, NULL); + +static struct device_node *cdev_node; + +static struct vio_dev *root_vdev; +static u64 cdev_cfg_handle; + +static const u64 *vio_cfg_handle(struct mdesc_handle *hp, u64 node) +{ + const u64 *cfg_handle = NULL; + u64 a; + + mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { + u64 target; + + target = mdesc_arc_target(hp, a); + cfg_handle = mdesc_get_property(hp, target, + "cfg-handle", NULL); + if (cfg_handle) + break; + } + + return cfg_handle; +} + +/** + * vio_vdev_node() - Find VDEV node in MD + * @hp: Handle to the MD + * @vdev: Pointer to VDEV + * + * Find the node in the current MD which matches the given vio_dev. This + * must be done dynamically since the node value can change if the MD + * is updated. + * + * NOTE: the MD must be locked, using mdesc_grab(), when calling this routine + * + * Return: The VDEV node in MDESC + */ +u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev) +{ + u64 node; + + if (vdev == NULL) + return MDESC_NODE_NULL; + + node = mdesc_get_node(hp, (const char *)vdev->node_name, + &vdev->md_node_info); + + return node; +} +EXPORT_SYMBOL(vio_vdev_node); + +static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, + struct vio_dev *vdev) +{ + u64 a; + + vdev->tx_ino = ~0UL; + vdev->rx_ino = ~0UL; + vdev->channel_id = ~0UL; + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { + const u64 *chan_id; + const u64 *irq; + u64 target; + + target = mdesc_arc_target(hp, a); + + irq = mdesc_get_property(hp, target, "tx-ino", NULL); + if (irq) + vdev->tx_ino = *irq; + + irq = mdesc_get_property(hp, target, "rx-ino", NULL); + if (irq) + vdev->rx_ino = *irq; + + chan_id = mdesc_get_property(hp, target, "id", NULL); + if (chan_id) + vdev->channel_id = *chan_id; + } + + vdev->cdev_handle = cdev_cfg_handle; +} + +int vio_set_intr(unsigned long dev_ino, int state) +{ + int err; + + err = sun4v_vintr_set_valid(cdev_cfg_handle, dev_ino, state); + return err; +} +EXPORT_SYMBOL(vio_set_intr); + +static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, + const char *node_name, + struct device *parent) +{ + const char *type, *compat; + struct device_node *dp; + struct vio_dev *vdev; + int err, tlen, clen; + const u64 *id, *cfg_handle; + + type = mdesc_get_property(hp, mp, "device-type", &tlen); + if (!type) { + type = mdesc_get_property(hp, mp, "name", &tlen); + if (!type) { + type = mdesc_node_name(hp, mp); + tlen = strlen(type) + 1; + } + } + if (tlen > VIO_MAX_TYPE_LEN || strlen(type) >= VIO_MAX_TYPE_LEN) { + printk(KERN_ERR "VIO: Type string [%s] is too long.\n", + type); + return NULL; + } + + id = mdesc_get_property(hp, mp, "id", NULL); + + cfg_handle = vio_cfg_handle(hp, mp); + + compat = mdesc_get_property(hp, mp, "device-type", &clen); + if (!compat) { + clen = 0; + } else if (clen > VIO_MAX_COMPAT_LEN) { + printk(KERN_ERR "VIO: Compat len %d for [%s] is too long.\n", + clen, type); + return NULL; + } + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + printk(KERN_ERR "VIO: Could not allocate vio_dev\n"); + return NULL; + } + + vdev->mp = mp; + memcpy(vdev->type, type, tlen); + if (compat) + memcpy(vdev->compat, compat, clen); + else + memset(vdev->compat, 0, sizeof(vdev->compat)); + vdev->compat_len = clen; + + vdev->port_id = ~0UL; + vdev->tx_irq = 0; + vdev->rx_irq = 0; + + vio_fill_channel_info(hp, mp, vdev); + + if (!id) { + dev_set_name(&vdev->dev, "%s", type); + vdev->dev_no = ~(u64)0; + } else if (!cfg_handle) { + dev_set_name(&vdev->dev, "%s-%llu", type, *id); + vdev->dev_no = *id; + } else { + dev_set_name(&vdev->dev, "%s-%llu-%llu", type, + *cfg_handle, *id); + vdev->dev_no = *cfg_handle; + vdev->port_id = *id; + } + + vdev->dev.parent = parent; + vdev->dev.bus = &vio_bus_type; + vdev->dev.release = vio_dev_release; + + if (parent == NULL) { + dp = cdev_node; + } else if (to_vio_dev(parent) == root_vdev) { + for_each_child_of_node(cdev_node, dp) { + if (of_node_is_type(dp, type)) + break; + } + } else { + dp = to_vio_dev(parent)->dp; + } + vdev->dp = dp; + + /* + * node_name is NULL for the parent/channel-devices node and + * the parent doesn't require the MD node info. + */ + if (node_name != NULL) { + (void) snprintf(vdev->node_name, VIO_MAX_NAME_LEN, "%s", + node_name); + + err = mdesc_get_node_info(hp, mp, node_name, + &vdev->md_node_info); + if (err) { + pr_err("VIO: Could not get MD node info %s, err=%d\n", + dev_name(&vdev->dev), err); + kfree(vdev); + return NULL; + } + } + + pr_info("VIO: Adding device %s (tx_ino = %llx, rx_ino = %llx)\n", + dev_name(&vdev->dev), vdev->tx_ino, vdev->rx_ino); + + err = device_register(&vdev->dev); + if (err) { + printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", + dev_name(&vdev->dev), err); + put_device(&vdev->dev); + return NULL; + } + if (vdev->dp) + err = sysfs_create_file(&vdev->dev.kobj, + &dev_attr_obppath.attr); + + return vdev; +} + +static void vio_add(struct mdesc_handle *hp, u64 node, + const char *node_name) +{ + (void) vio_create_one(hp, node, node_name, &root_vdev->dev); +} + +struct vio_remove_node_data { + struct mdesc_handle *hp; + u64 node; +}; + +static int vio_md_node_match(struct device *dev, void *arg) +{ + struct vio_dev *vdev = to_vio_dev(dev); + struct vio_remove_node_data *node_data; + u64 node; + + node_data = (struct vio_remove_node_data *)arg; + + node = vio_vdev_node(node_data->hp, vdev); + + if (node == node_data->node) + return 1; + else + return 0; +} + +static void vio_remove(struct mdesc_handle *hp, u64 node, const char *node_name) +{ + struct vio_remove_node_data node_data; + struct device *dev; + + node_data.hp = hp; + node_data.node = node; + + dev = device_find_child(&root_vdev->dev, (void *)&node_data, + vio_md_node_match); + if (dev) { + printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); + + device_unregister(dev); + put_device(dev); + } else { + pr_err("VIO: %s node not found in MDESC\n", node_name); + } +} + +static struct mdesc_notifier_client vio_device_notifier = { + .add = vio_add, + .remove = vio_remove, + .node_name = "virtual-device-port", +}; + +/* We are only interested in domain service ports under the + * "domain-services" node. On control nodes there is another port + * under "openboot" that we should not mess with as aparently that is + * reserved exclusively for OBP use. + */ +static void vio_add_ds(struct mdesc_handle *hp, u64 node, + const char *node_name) +{ + int found; + u64 a; + + found = 0; + mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { + u64 target = mdesc_arc_target(hp, a); + const char *name = mdesc_node_name(hp, target); + + if (!strcmp(name, "domain-services")) { + found = 1; + break; + } + } + + if (found) + (void) vio_create_one(hp, node, node_name, &root_vdev->dev); +} + +static struct mdesc_notifier_client vio_ds_notifier = { + .add = vio_add_ds, + .remove = vio_remove, + .node_name = "domain-services-port", +}; + +static const char *channel_devices_node = "channel-devices"; +static const char *channel_devices_compat = "SUNW,sun4v-channel-devices"; +static const char *cfg_handle_prop = "cfg-handle"; + +static int __init vio_init(void) +{ + struct mdesc_handle *hp; + const char *compat; + const u64 *cfg_handle; + int err, len; + u64 root; + + err = bus_register(&vio_bus_type); + if (err) { + printk(KERN_ERR "VIO: Could not register bus type err=%d\n", + err); + return err; + } + + hp = mdesc_grab(); + if (!hp) + return 0; + + root = mdesc_node_by_name(hp, MDESC_NODE_NULL, channel_devices_node); + if (root == MDESC_NODE_NULL) { + printk(KERN_INFO "VIO: No channel-devices MDESC node.\n"); + mdesc_release(hp); + return 0; + } + + cdev_node = of_find_node_by_name(NULL, "channel-devices"); + err = -ENODEV; + if (!cdev_node) { + printk(KERN_INFO "VIO: No channel-devices OBP node.\n"); + goto out_release; + } + + compat = mdesc_get_property(hp, root, "compatible", &len); + if (!compat) { + printk(KERN_ERR "VIO: Channel devices lacks compatible " + "property\n"); + goto out_release; + } + if (!of_find_in_proplist(compat, channel_devices_compat, len)) { + printk(KERN_ERR "VIO: Channel devices node lacks (%s) " + "compat entry.\n", channel_devices_compat); + goto out_release; + } + + cfg_handle = mdesc_get_property(hp, root, cfg_handle_prop, NULL); + if (!cfg_handle) { + printk(KERN_ERR "VIO: Channel devices lacks %s property\n", + cfg_handle_prop); + goto out_release; + } + + cdev_cfg_handle = *cfg_handle; + + root_vdev = vio_create_one(hp, root, NULL, NULL); + err = -ENODEV; + if (!root_vdev) { + printk(KERN_ERR "VIO: Could not create root device.\n"); + goto out_release; + } + + mdesc_register_notifier(&vio_device_notifier); + mdesc_register_notifier(&vio_ds_notifier); + + mdesc_release(hp); + + return err; + +out_release: + mdesc_release(hp); + return err; +} + +postcore_initcall(vio_init); diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c new file mode 100644 index 000000000..e27afd233 --- /dev/null +++ b/arch/sparc/kernel/viohs.c @@ -0,0 +1,860 @@ +// SPDX-License-Identifier: GPL-2.0 +/* viohs.c: LDOM Virtual I/O handshake helper layer. + * + * Copyright (C) 2007 David S. Miller + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +int vio_ldc_send(struct vio_driver_state *vio, void *data, int len) +{ + int err, limit = 1000; + + err = -EINVAL; + while (limit-- > 0) { + err = ldc_write(vio->lp, data, len); + if (!err || (err != -EAGAIN)) + break; + udelay(1); + } + + return err; +} +EXPORT_SYMBOL(vio_ldc_send); + +static int send_ctrl(struct vio_driver_state *vio, + struct vio_msg_tag *tag, int len) +{ + tag->sid = vio_send_sid(vio); + return vio_ldc_send(vio, tag, len); +} + +static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env) +{ + tag->type = type; + tag->stype = stype; + tag->stype_env = stype_env; +} + +static int send_version(struct vio_driver_state *vio, u16 major, u16 minor) +{ + struct vio_ver_info pkt; + + vio->_local_sid = (u32) sched_clock(); + + memset(&pkt, 0, sizeof(pkt)); + init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO); + pkt.major = major; + pkt.minor = minor; + pkt.dev_class = vio->dev_class; + + viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n", + major, minor, vio->dev_class); + + return send_ctrl(vio, &pkt.tag, sizeof(pkt)); +} + +static int start_handshake(struct vio_driver_state *vio) +{ + int err; + + viodbg(HS, "START HANDSHAKE\n"); + + vio->hs_state = VIO_HS_INVALID; + + err = send_version(vio, + vio->ver_table[0].major, + vio->ver_table[0].minor); + if (err < 0) + return err; + + return 0; +} + +static void flush_rx_dring(struct vio_driver_state *vio) +{ + struct vio_dring_state *dr; + u64 ident; + + BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG)); + + dr = &vio->drings[VIO_DRIVER_RX_RING]; + ident = dr->ident; + + BUG_ON(!vio->desc_buf); + kfree(vio->desc_buf); + vio->desc_buf = NULL; + + memset(dr, 0, sizeof(*dr)); + dr->ident = ident; +} + +void vio_link_state_change(struct vio_driver_state *vio, int event) +{ + if (event == LDC_EVENT_UP) { + vio->hs_state = VIO_HS_INVALID; + + switch (vio->dev_class) { + case VDEV_NETWORK: + case VDEV_NETWORK_SWITCH: + vio->dr_state = (VIO_DR_STATE_TXREQ | + VIO_DR_STATE_RXREQ); + break; + + case VDEV_DISK: + vio->dr_state = VIO_DR_STATE_TXREQ; + break; + case VDEV_DISK_SERVER: + vio->dr_state = VIO_DR_STATE_RXREQ; + break; + } + start_handshake(vio); + } else if (event == LDC_EVENT_RESET) { + vio->hs_state = VIO_HS_INVALID; + + if (vio->dr_state & VIO_DR_STATE_RXREG) + flush_rx_dring(vio); + + vio->dr_state = 0x00; + memset(&vio->ver, 0, sizeof(vio->ver)); + + ldc_disconnect(vio->lp); + } +} +EXPORT_SYMBOL(vio_link_state_change); + +static int handshake_failure(struct vio_driver_state *vio) +{ + struct vio_dring_state *dr; + + /* XXX Put policy here... Perhaps start a timer to fire + * XXX in 100 ms, which will bring the link up and retry + * XXX the handshake. + */ + + viodbg(HS, "HANDSHAKE FAILURE\n"); + + vio->dr_state &= ~(VIO_DR_STATE_TXREG | + VIO_DR_STATE_RXREG); + + dr = &vio->drings[VIO_DRIVER_RX_RING]; + memset(dr, 0, sizeof(*dr)); + + kfree(vio->desc_buf); + vio->desc_buf = NULL; + vio->desc_buf_len = 0; + + vio->hs_state = VIO_HS_INVALID; + + return -ECONNRESET; +} + +static int process_unknown(struct vio_driver_state *vio, void *arg) +{ + struct vio_msg_tag *pkt = arg; + + viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n", + pkt->type, pkt->stype, pkt->stype_env, pkt->sid); + + printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n", + vio->vdev->channel_id); + + ldc_disconnect(vio->lp); + + return -ECONNRESET; +} + +static int send_dreg(struct vio_driver_state *vio) +{ + struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING]; + union { + struct vio_dring_register pkt; + char all[sizeof(struct vio_dring_register) + + (sizeof(struct ldc_trans_cookie) * + VIO_MAX_RING_COOKIES)]; + } u; + size_t bytes = sizeof(struct vio_dring_register) + + (sizeof(struct ldc_trans_cookie) * + dr->ncookies); + int i; + + if (WARN_ON(bytes > sizeof(u))) + return -EINVAL; + + memset(&u, 0, bytes); + init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG); + u.pkt.dring_ident = 0; + u.pkt.num_descr = dr->num_entries; + u.pkt.descr_size = dr->entry_size; + u.pkt.options = VIO_TX_DRING; + u.pkt.num_cookies = dr->ncookies; + + viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] " + "ncookies[%u]\n", + u.pkt.num_descr, u.pkt.descr_size, u.pkt.options, + u.pkt.num_cookies); + + for (i = 0; i < dr->ncookies; i++) { + u.pkt.cookies[i] = dr->cookies[i]; + + viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n", + i, + (unsigned long long) u.pkt.cookies[i].cookie_addr, + (unsigned long long) u.pkt.cookies[i].cookie_size); + } + + return send_ctrl(vio, &u.pkt.tag, bytes); +} + +static int send_rdx(struct vio_driver_state *vio) +{ + struct vio_rdx pkt; + + memset(&pkt, 0, sizeof(pkt)); + + init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX); + + viodbg(HS, "SEND RDX INFO\n"); + + return send_ctrl(vio, &pkt.tag, sizeof(pkt)); +} + +static int send_attr(struct vio_driver_state *vio) +{ + if (!vio->ops) + return -EINVAL; + + return vio->ops->send_attr(vio); +} + +static struct vio_version *find_by_major(struct vio_driver_state *vio, + u16 major) +{ + struct vio_version *ret = NULL; + int i; + + for (i = 0; i < vio->ver_table_entries; i++) { + struct vio_version *v = &vio->ver_table[i]; + if (v->major <= major) { + ret = v; + break; + } + } + return ret; +} + +static int process_ver_info(struct vio_driver_state *vio, + struct vio_ver_info *pkt) +{ + struct vio_version *vap; + int err; + + viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n", + pkt->major, pkt->minor, pkt->dev_class); + + if (vio->hs_state != VIO_HS_INVALID) { + /* XXX Perhaps invoke start_handshake? XXX */ + memset(&vio->ver, 0, sizeof(vio->ver)); + vio->hs_state = VIO_HS_INVALID; + } + + vap = find_by_major(vio, pkt->major); + + vio->_peer_sid = pkt->tag.sid; + + if (!vap) { + pkt->tag.stype = VIO_SUBTYPE_NACK; + pkt->major = 0; + pkt->minor = 0; + viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n"); + err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); + } else if (vap->major != pkt->major) { + pkt->tag.stype = VIO_SUBTYPE_NACK; + pkt->major = vap->major; + pkt->minor = vap->minor; + viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n", + pkt->major, pkt->minor); + err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); + } else { + struct vio_version ver = { + .major = pkt->major, + .minor = pkt->minor, + }; + if (ver.minor > vap->minor) + ver.minor = vap->minor; + pkt->minor = ver.minor; + pkt->tag.stype = VIO_SUBTYPE_ACK; + pkt->dev_class = vio->dev_class; + viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n", + pkt->major, pkt->minor); + err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); + if (err > 0) { + vio->ver = ver; + vio->hs_state = VIO_HS_GOTVERS; + } + } + if (err < 0) + return handshake_failure(vio); + + return 0; +} + +static int process_ver_ack(struct vio_driver_state *vio, + struct vio_ver_info *pkt) +{ + viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n", + pkt->major, pkt->minor, pkt->dev_class); + + if (vio->hs_state & VIO_HS_GOTVERS) { + if (vio->ver.major != pkt->major || + vio->ver.minor != pkt->minor) { + pkt->tag.stype = VIO_SUBTYPE_NACK; + (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt)); + return handshake_failure(vio); + } + } else { + vio->ver.major = pkt->major; + vio->ver.minor = pkt->minor; + vio->hs_state = VIO_HS_GOTVERS; + } + + switch (vio->dev_class) { + case VDEV_NETWORK: + case VDEV_DISK: + if (send_attr(vio) < 0) + return handshake_failure(vio); + break; + + default: + break; + } + + return 0; +} + +static int process_ver_nack(struct vio_driver_state *vio, + struct vio_ver_info *pkt) +{ + struct vio_version *nver; + + viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n", + pkt->major, pkt->minor, pkt->dev_class); + + if (pkt->major == 0 && pkt->minor == 0) + return handshake_failure(vio); + nver = find_by_major(vio, pkt->major); + if (!nver) + return handshake_failure(vio); + + if (send_version(vio, nver->major, nver->minor) < 0) + return handshake_failure(vio); + + return 0; +} + +static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt) +{ + switch (pkt->tag.stype) { + case VIO_SUBTYPE_INFO: + return process_ver_info(vio, pkt); + + case VIO_SUBTYPE_ACK: + return process_ver_ack(vio, pkt); + + case VIO_SUBTYPE_NACK: + return process_ver_nack(vio, pkt); + + default: + return handshake_failure(vio); + } +} + +static int process_attr(struct vio_driver_state *vio, void *pkt) +{ + int err; + + if (!(vio->hs_state & VIO_HS_GOTVERS)) + return handshake_failure(vio); + + if (!vio->ops) + return 0; + + err = vio->ops->handle_attr(vio, pkt); + if (err < 0) { + return handshake_failure(vio); + } else { + vio->hs_state |= VIO_HS_GOT_ATTR; + + if ((vio->dr_state & VIO_DR_STATE_TXREQ) && + !(vio->hs_state & VIO_HS_SENT_DREG)) { + if (send_dreg(vio) < 0) + return handshake_failure(vio); + + vio->hs_state |= VIO_HS_SENT_DREG; + } + } + + return 0; +} + +static int all_drings_registered(struct vio_driver_state *vio) +{ + int need_rx, need_tx; + + need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ); + need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ); + + if (need_rx && + !(vio->dr_state & VIO_DR_STATE_RXREG)) + return 0; + + if (need_tx && + !(vio->dr_state & VIO_DR_STATE_TXREG)) + return 0; + + return 1; +} + +static int process_dreg_info(struct vio_driver_state *vio, + struct vio_dring_register *pkt) +{ + struct vio_dring_state *dr; + int i; + + viodbg(HS, "GOT DRING_REG INFO ident[%llx] " + "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", + (unsigned long long) pkt->dring_ident, + pkt->num_descr, pkt->descr_size, pkt->options, + pkt->num_cookies); + + if (!(vio->dr_state & VIO_DR_STATE_RXREQ)) + goto send_nack; + + if (vio->dr_state & VIO_DR_STATE_RXREG) + goto send_nack; + + /* v1.6 and higher, ACK with desired, supported mode, or NACK */ + if (vio_version_after_eq(vio, 1, 6)) { + if (!(pkt->options & VIO_TX_DRING)) + goto send_nack; + pkt->options = VIO_TX_DRING; + } + + BUG_ON(vio->desc_buf); + + vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC); + if (!vio->desc_buf) + goto send_nack; + + vio->desc_buf_len = pkt->descr_size; + + dr = &vio->drings[VIO_DRIVER_RX_RING]; + + dr->num_entries = pkt->num_descr; + dr->entry_size = pkt->descr_size; + dr->ncookies = pkt->num_cookies; + for (i = 0; i < dr->ncookies; i++) { + dr->cookies[i] = pkt->cookies[i]; + + viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n", + i, + (unsigned long long) + pkt->cookies[i].cookie_addr, + (unsigned long long) + pkt->cookies[i].cookie_size); + } + + pkt->tag.stype = VIO_SUBTYPE_ACK; + pkt->dring_ident = ++dr->ident; + + viodbg(HS, "SEND DRING_REG ACK ident[%llx] " + "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", + (unsigned long long) pkt->dring_ident, + pkt->num_descr, pkt->descr_size, pkt->options, + pkt->num_cookies); + + if (send_ctrl(vio, &pkt->tag, struct_size(pkt, cookies, dr->ncookies)) < 0) + goto send_nack; + + vio->dr_state |= VIO_DR_STATE_RXREG; + + return 0; + +send_nack: + pkt->tag.stype = VIO_SUBTYPE_NACK; + viodbg(HS, "SEND DRING_REG NACK\n"); + (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt)); + + return handshake_failure(vio); +} + +static int process_dreg_ack(struct vio_driver_state *vio, + struct vio_dring_register *pkt) +{ + struct vio_dring_state *dr; + + viodbg(HS, "GOT DRING_REG ACK ident[%llx] " + "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", + (unsigned long long) pkt->dring_ident, + pkt->num_descr, pkt->descr_size, pkt->options, + pkt->num_cookies); + + dr = &vio->drings[VIO_DRIVER_TX_RING]; + + if (!(vio->dr_state & VIO_DR_STATE_TXREQ)) + return handshake_failure(vio); + + dr->ident = pkt->dring_ident; + vio->dr_state |= VIO_DR_STATE_TXREG; + + if (all_drings_registered(vio)) { + if (send_rdx(vio) < 0) + return handshake_failure(vio); + vio->hs_state = VIO_HS_SENT_RDX; + } + return 0; +} + +static int process_dreg_nack(struct vio_driver_state *vio, + struct vio_dring_register *pkt) +{ + viodbg(HS, "GOT DRING_REG NACK ident[%llx] " + "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", + (unsigned long long) pkt->dring_ident, + pkt->num_descr, pkt->descr_size, pkt->options, + pkt->num_cookies); + + return handshake_failure(vio); +} + +static int process_dreg(struct vio_driver_state *vio, + struct vio_dring_register *pkt) +{ + if (!(vio->hs_state & VIO_HS_GOTVERS)) + return handshake_failure(vio); + + switch (pkt->tag.stype) { + case VIO_SUBTYPE_INFO: + return process_dreg_info(vio, pkt); + + case VIO_SUBTYPE_ACK: + return process_dreg_ack(vio, pkt); + + case VIO_SUBTYPE_NACK: + return process_dreg_nack(vio, pkt); + + default: + return handshake_failure(vio); + } +} + +static int process_dunreg(struct vio_driver_state *vio, + struct vio_dring_unregister *pkt) +{ + struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING]; + + viodbg(HS, "GOT DRING_UNREG\n"); + + if (pkt->dring_ident != dr->ident) + return 0; + + vio->dr_state &= ~VIO_DR_STATE_RXREG; + + memset(dr, 0, sizeof(*dr)); + + kfree(vio->desc_buf); + vio->desc_buf = NULL; + vio->desc_buf_len = 0; + + return 0; +} + +static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt) +{ + viodbg(HS, "GOT RDX INFO\n"); + + pkt->tag.stype = VIO_SUBTYPE_ACK; + viodbg(HS, "SEND RDX ACK\n"); + if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0) + return handshake_failure(vio); + + vio->hs_state |= VIO_HS_SENT_RDX_ACK; + return 0; +} + +static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt) +{ + viodbg(HS, "GOT RDX ACK\n"); + + if (!(vio->hs_state & VIO_HS_SENT_RDX)) + return handshake_failure(vio); + + vio->hs_state |= VIO_HS_GOT_RDX_ACK; + return 0; +} + +static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt) +{ + viodbg(HS, "GOT RDX NACK\n"); + + return handshake_failure(vio); +} + +static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt) +{ + if (!all_drings_registered(vio)) + handshake_failure(vio); + + switch (pkt->tag.stype) { + case VIO_SUBTYPE_INFO: + return process_rdx_info(vio, pkt); + + case VIO_SUBTYPE_ACK: + return process_rdx_ack(vio, pkt); + + case VIO_SUBTYPE_NACK: + return process_rdx_nack(vio, pkt); + + default: + return handshake_failure(vio); + } +} + +int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt) +{ + struct vio_msg_tag *tag = pkt; + u8 prev_state = vio->hs_state; + int err; + + switch (tag->stype_env) { + case VIO_VER_INFO: + err = process_ver(vio, pkt); + break; + + case VIO_ATTR_INFO: + err = process_attr(vio, pkt); + break; + + case VIO_DRING_REG: + err = process_dreg(vio, pkt); + break; + + case VIO_DRING_UNREG: + err = process_dunreg(vio, pkt); + break; + + case VIO_RDX: + err = process_rdx(vio, pkt); + break; + + default: + err = process_unknown(vio, pkt); + break; + } + + if (!err && + vio->hs_state != prev_state && + (vio->hs_state & VIO_HS_COMPLETE)) { + if (vio->ops) + vio->ops->handshake_complete(vio); + } + + return err; +} +EXPORT_SYMBOL(vio_control_pkt_engine); + +void vio_conn_reset(struct vio_driver_state *vio) +{ +} +EXPORT_SYMBOL(vio_conn_reset); + +/* The issue is that the Solaris virtual disk server just mirrors the + * SID values it gets from the client peer. So we work around that + * here in vio_{validate,send}_sid() so that the drivers don't need + * to be aware of this crap. + */ +int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp) +{ + u32 sid; + + /* Always let VERSION+INFO packets through unchecked, they + * define the new SID. + */ + if (tp->type == VIO_TYPE_CTRL && + tp->stype == VIO_SUBTYPE_INFO && + tp->stype_env == VIO_VER_INFO) + return 0; + + /* Ok, now figure out which SID to use. */ + switch (vio->dev_class) { + case VDEV_NETWORK: + case VDEV_NETWORK_SWITCH: + case VDEV_DISK_SERVER: + default: + sid = vio->_peer_sid; + break; + + case VDEV_DISK: + sid = vio->_local_sid; + break; + } + + if (sid == tp->sid) + return 0; + viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n", + tp->sid, vio->_peer_sid, vio->_local_sid); + return -EINVAL; +} +EXPORT_SYMBOL(vio_validate_sid); + +u32 vio_send_sid(struct vio_driver_state *vio) +{ + switch (vio->dev_class) { + case VDEV_NETWORK: + case VDEV_NETWORK_SWITCH: + case VDEV_DISK: + default: + return vio->_local_sid; + + case VDEV_DISK_SERVER: + return vio->_peer_sid; + } +} +EXPORT_SYMBOL(vio_send_sid); + +int vio_ldc_alloc(struct vio_driver_state *vio, + struct ldc_channel_config *base_cfg, + void *event_arg) +{ + struct ldc_channel_config cfg = *base_cfg; + struct ldc_channel *lp; + + cfg.tx_irq = vio->vdev->tx_irq; + cfg.rx_irq = vio->vdev->rx_irq; + + lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name); + if (IS_ERR(lp)) + return PTR_ERR(lp); + + vio->lp = lp; + + return 0; +} +EXPORT_SYMBOL(vio_ldc_alloc); + +void vio_ldc_free(struct vio_driver_state *vio) +{ + ldc_free(vio->lp); + vio->lp = NULL; + + kfree(vio->desc_buf); + vio->desc_buf = NULL; + vio->desc_buf_len = 0; +} +EXPORT_SYMBOL(vio_ldc_free); + +void vio_port_up(struct vio_driver_state *vio) +{ + unsigned long flags; + int err, state; + + spin_lock_irqsave(&vio->lock, flags); + + state = ldc_state(vio->lp); + + err = 0; + if (state == LDC_STATE_INIT) { + err = ldc_bind(vio->lp); + if (err) + printk(KERN_WARNING "%s: Port %lu bind failed, " + "err=%d\n", + vio->name, vio->vdev->channel_id, err); + } + + if (!err) { + if (ldc_mode(vio->lp) == LDC_MODE_RAW) + ldc_set_state(vio->lp, LDC_STATE_CONNECTED); + else + err = ldc_connect(vio->lp); + + if (err) + printk(KERN_WARNING "%s: Port %lu connect failed, " + "err=%d\n", + vio->name, vio->vdev->channel_id, err); + } + if (err) { + unsigned long expires = jiffies + HZ; + + expires = round_jiffies(expires); + mod_timer(&vio->timer, expires); + } + + spin_unlock_irqrestore(&vio->lock, flags); +} +EXPORT_SYMBOL(vio_port_up); + +static void vio_port_timer(struct timer_list *t) +{ + struct vio_driver_state *vio = from_timer(vio, t, timer); + + vio_port_up(vio); +} + +int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, + u8 dev_class, struct vio_version *ver_table, + int ver_table_size, struct vio_driver_ops *ops, + char *name) +{ + switch (dev_class) { + case VDEV_NETWORK: + case VDEV_NETWORK_SWITCH: + case VDEV_DISK: + case VDEV_DISK_SERVER: + case VDEV_CONSOLE_CON: + break; + + default: + return -EINVAL; + } + + if (dev_class == VDEV_NETWORK || + dev_class == VDEV_NETWORK_SWITCH || + dev_class == VDEV_DISK || + dev_class == VDEV_DISK_SERVER) { + if (!ops || !ops->send_attr || !ops->handle_attr || + !ops->handshake_complete) + return -EINVAL; + } + + if (!ver_table || ver_table_size < 0) + return -EINVAL; + + if (!name) + return -EINVAL; + + spin_lock_init(&vio->lock); + + vio->name = name; + + vio->dev_class = dev_class; + vio->vdev = vdev; + + vio->ver_table = ver_table; + vio->ver_table_entries = ver_table_size; + + vio->ops = ops; + + timer_setup(&vio->timer, vio_port_timer, 0); + + return 0; +} +EXPORT_SYMBOL(vio_driver_init); diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c new file mode 100644 index 000000000..64ed80ed6 --- /dev/null +++ b/arch/sparc/kernel/visemul.c @@ -0,0 +1,899 @@ +// SPDX-License-Identifier: GPL-2.0 +/* visemul.c: Emulation of VIS instructions. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* OPF field of various VIS instructions. */ + +/* 000111011 - four 16-bit packs */ +#define FPACK16_OPF 0x03b + +/* 000111010 - two 32-bit packs */ +#define FPACK32_OPF 0x03a + +/* 000111101 - four 16-bit packs */ +#define FPACKFIX_OPF 0x03d + +/* 001001101 - four 16-bit expands */ +#define FEXPAND_OPF 0x04d + +/* 001001011 - two 32-bit merges */ +#define FPMERGE_OPF 0x04b + +/* 000110001 - 8-by-16-bit partitioned product */ +#define FMUL8x16_OPF 0x031 + +/* 000110011 - 8-by-16-bit upper alpha partitioned product */ +#define FMUL8x16AU_OPF 0x033 + +/* 000110101 - 8-by-16-bit lower alpha partitioned product */ +#define FMUL8x16AL_OPF 0x035 + +/* 000110110 - upper 8-by-16-bit partitioned product */ +#define FMUL8SUx16_OPF 0x036 + +/* 000110111 - lower 8-by-16-bit partitioned product */ +#define FMUL8ULx16_OPF 0x037 + +/* 000111000 - upper 8-by-16-bit partitioned product */ +#define FMULD8SUx16_OPF 0x038 + +/* 000111001 - lower unsigned 8-by-16-bit partitioned product */ +#define FMULD8ULx16_OPF 0x039 + +/* 000101000 - four 16-bit compare; set rd if src1 > src2 */ +#define FCMPGT16_OPF 0x028 + +/* 000101100 - two 32-bit compare; set rd if src1 > src2 */ +#define FCMPGT32_OPF 0x02c + +/* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ +#define FCMPLE16_OPF 0x020 + +/* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ +#define FCMPLE32_OPF 0x024 + +/* 000100010 - four 16-bit compare; set rd if src1 != src2 */ +#define FCMPNE16_OPF 0x022 + +/* 000100110 - two 32-bit compare; set rd if src1 != src2 */ +#define FCMPNE32_OPF 0x026 + +/* 000101010 - four 16-bit compare; set rd if src1 == src2 */ +#define FCMPEQ16_OPF 0x02a + +/* 000101110 - two 32-bit compare; set rd if src1 == src2 */ +#define FCMPEQ32_OPF 0x02e + +/* 000000000 - Eight 8-bit edge boundary processing */ +#define EDGE8_OPF 0x000 + +/* 000000001 - Eight 8-bit edge boundary processing, no CC */ +#define EDGE8N_OPF 0x001 + +/* 000000010 - Eight 8-bit edge boundary processing, little-endian */ +#define EDGE8L_OPF 0x002 + +/* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */ +#define EDGE8LN_OPF 0x003 + +/* 000000100 - Four 16-bit edge boundary processing */ +#define EDGE16_OPF 0x004 + +/* 000000101 - Four 16-bit edge boundary processing, no CC */ +#define EDGE16N_OPF 0x005 + +/* 000000110 - Four 16-bit edge boundary processing, little-endian */ +#define EDGE16L_OPF 0x006 + +/* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */ +#define EDGE16LN_OPF 0x007 + +/* 000001000 - Two 32-bit edge boundary processing */ +#define EDGE32_OPF 0x008 + +/* 000001001 - Two 32-bit edge boundary processing, no CC */ +#define EDGE32N_OPF 0x009 + +/* 000001010 - Two 32-bit edge boundary processing, little-endian */ +#define EDGE32L_OPF 0x00a + +/* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */ +#define EDGE32LN_OPF 0x00b + +/* 000111110 - distance between 8 8-bit components */ +#define PDIST_OPF 0x03e + +/* 000010000 - convert 8-bit 3-D address to blocked byte address */ +#define ARRAY8_OPF 0x010 + +/* 000010010 - convert 16-bit 3-D address to blocked byte address */ +#define ARRAY16_OPF 0x012 + +/* 000010100 - convert 32-bit 3-D address to blocked byte address */ +#define ARRAY32_OPF 0x014 + +/* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */ +#define BMASK_OPF 0x019 + +/* 001001100 - Permute bytes as specified by GSR.MASK */ +#define BSHUFFLE_OPF 0x04c + +#define VIS_OPF_SHIFT 5 +#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) + +#define RS1(INSN) (((INSN) >> 14) & 0x1f) +#define RS2(INSN) (((INSN) >> 0) & 0x1f) +#define RD(INSN) (((INSN) >> 25) & 0x1f) + +static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, + unsigned int rd, int from_kernel) +{ + if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { + if (from_kernel != 0) + __asm__ __volatile__("flushw"); + else + flushw_user(); + } +} + +static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) +{ + unsigned long value, fp; + + if (reg < 16) + return (!reg ? 0 : regs->u_regs[reg]); + + fp = regs->u_regs[UREG_FP]; + + if (regs->tstate & TSTATE_PRIV) { + struct reg_window *win; + win = (struct reg_window *)(fp + STACK_BIAS); + value = win->locals[reg - 16]; + } else if (!test_thread_64bit_stack(fp)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); + get_user(value, &win32->locals[reg - 16]); + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(fp + STACK_BIAS); + get_user(value, &win->locals[reg - 16]); + } + return value; +} + +static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, + struct pt_regs *regs) +{ + unsigned long fp = regs->u_regs[UREG_FP]; + + BUG_ON(reg < 16); + BUG_ON(regs->tstate & TSTATE_PRIV); + + if (!test_thread_64bit_stack(fp)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); + return (unsigned long __user *)&win32->locals[reg - 16]; + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(fp + STACK_BIAS); + return &win->locals[reg - 16]; + } +} + +static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg, + struct pt_regs *regs) +{ + BUG_ON(reg >= 16); + BUG_ON(regs->tstate & TSTATE_PRIV); + + return ®s->u_regs[reg]; +} + +static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) +{ + if (rd < 16) { + unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs); + + *rd_kern = val; + } else { + unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); + + if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) + __put_user((u32)val, (u32 __user *)rd_user); + else + __put_user(val, rd_user); + } +} + +static inline unsigned long fpd_regval(struct fpustate *f, + unsigned int insn_regnum) +{ + insn_regnum = (((insn_regnum & 1) << 5) | + (insn_regnum & 0x1e)); + + return *(unsigned long *) &f->regs[insn_regnum]; +} + +static inline unsigned long *fpd_regaddr(struct fpustate *f, + unsigned int insn_regnum) +{ + insn_regnum = (((insn_regnum & 1) << 5) | + (insn_regnum & 0x1e)); + + return (unsigned long *) &f->regs[insn_regnum]; +} + +static inline unsigned int fps_regval(struct fpustate *f, + unsigned int insn_regnum) +{ + return f->regs[insn_regnum]; +} + +static inline unsigned int *fps_regaddr(struct fpustate *f, + unsigned int insn_regnum) +{ + return &f->regs[insn_regnum]; +} + +struct edge_tab { + u16 left, right; +}; +static struct edge_tab edge8_tab[8] = { + { 0xff, 0x80 }, + { 0x7f, 0xc0 }, + { 0x3f, 0xe0 }, + { 0x1f, 0xf0 }, + { 0x0f, 0xf8 }, + { 0x07, 0xfc }, + { 0x03, 0xfe }, + { 0x01, 0xff }, +}; +static struct edge_tab edge8_tab_l[8] = { + { 0xff, 0x01 }, + { 0xfe, 0x03 }, + { 0xfc, 0x07 }, + { 0xf8, 0x0f }, + { 0xf0, 0x1f }, + { 0xe0, 0x3f }, + { 0xc0, 0x7f }, + { 0x80, 0xff }, +}; +static struct edge_tab edge16_tab[4] = { + { 0xf, 0x8 }, + { 0x7, 0xc }, + { 0x3, 0xe }, + { 0x1, 0xf }, +}; +static struct edge_tab edge16_tab_l[4] = { + { 0xf, 0x1 }, + { 0xe, 0x3 }, + { 0xc, 0x7 }, + { 0x8, 0xf }, +}; +static struct edge_tab edge32_tab[2] = { + { 0x3, 0x2 }, + { 0x1, 0x3 }, +}; +static struct edge_tab edge32_tab_l[2] = { + { 0x3, 0x1 }, + { 0x2, 0x3 }, +}; + +static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val; + u16 left, right; + + maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); + orig_rs1 = rs1 = fetch_reg(RS1(insn), regs); + orig_rs2 = rs2 = fetch_reg(RS2(insn), regs); + + if (test_thread_flag(TIF_32BIT)) { + rs1 = rs1 & 0xffffffff; + rs2 = rs2 & 0xffffffff; + } + switch (opf) { + default: + case EDGE8_OPF: + case EDGE8N_OPF: + left = edge8_tab[rs1 & 0x7].left; + right = edge8_tab[rs2 & 0x7].right; + break; + case EDGE8L_OPF: + case EDGE8LN_OPF: + left = edge8_tab_l[rs1 & 0x7].left; + right = edge8_tab_l[rs2 & 0x7].right; + break; + + case EDGE16_OPF: + case EDGE16N_OPF: + left = edge16_tab[(rs1 >> 1) & 0x3].left; + right = edge16_tab[(rs2 >> 1) & 0x3].right; + break; + + case EDGE16L_OPF: + case EDGE16LN_OPF: + left = edge16_tab_l[(rs1 >> 1) & 0x3].left; + right = edge16_tab_l[(rs2 >> 1) & 0x3].right; + break; + + case EDGE32_OPF: + case EDGE32N_OPF: + left = edge32_tab[(rs1 >> 2) & 0x1].left; + right = edge32_tab[(rs2 >> 2) & 0x1].right; + break; + + case EDGE32L_OPF: + case EDGE32LN_OPF: + left = edge32_tab_l[(rs1 >> 2) & 0x1].left; + right = edge32_tab_l[(rs2 >> 2) & 0x1].right; + break; + } + + if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) + rd_val = right & left; + else + rd_val = left; + + store_reg(regs, rd_val, RD(insn)); + + switch (opf) { + case EDGE8_OPF: + case EDGE8L_OPF: + case EDGE16_OPF: + case EDGE16L_OPF: + case EDGE32_OPF: + case EDGE32L_OPF: { + unsigned long ccr, tstate; + + __asm__ __volatile__("subcc %1, %2, %%g0\n\t" + "rd %%ccr, %0" + : "=r" (ccr) + : "r" (orig_rs1), "r" (orig_rs2) + : "cc"); + tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); + regs->tstate = tstate | (ccr << 32UL); + } + } +} + +static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + unsigned long rs1, rs2, rd_val; + unsigned int bits, bits_mask; + + maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); + rs1 = fetch_reg(RS1(insn), regs); + rs2 = fetch_reg(RS2(insn), regs); + + bits = (rs2 > 5 ? 5 : rs2); + bits_mask = (1UL << bits) - 1UL; + + rd_val = ((((rs1 >> 11) & 0x3) << 0) | + (((rs1 >> 33) & 0x3) << 2) | + (((rs1 >> 55) & 0x1) << 4) | + (((rs1 >> 13) & 0xf) << 5) | + (((rs1 >> 35) & 0xf) << 9) | + (((rs1 >> 56) & 0xf) << 13) | + (((rs1 >> 17) & bits_mask) << 17) | + (((rs1 >> 39) & bits_mask) << (17 + bits)) | + (((rs1 >> 60) & 0xf) << (17 + (2*bits)))); + + switch (opf) { + case ARRAY16_OPF: + rd_val <<= 1; + break; + + case ARRAY32_OPF: + rd_val <<= 2; + } + + store_reg(regs, rd_val, RD(insn)); +} + +static void bmask(struct pt_regs *regs, unsigned int insn) +{ + unsigned long rs1, rs2, rd_val, gsr; + + maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); + rs1 = fetch_reg(RS1(insn), regs); + rs2 = fetch_reg(RS2(insn), regs); + rd_val = rs1 + rs2; + + store_reg(regs, rd_val, RD(insn)); + + gsr = current_thread_info()->gsr[0] & 0xffffffff; + gsr |= rd_val << 32UL; + current_thread_info()->gsr[0] = gsr; +} + +static void bshuffle(struct pt_regs *regs, unsigned int insn) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, rd_val; + unsigned long bmask, i; + + bmask = current_thread_info()->gsr[0] >> 32UL; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0UL; + for (i = 0; i < 8; i++) { + unsigned long which = (bmask >> (i * 4)) & 0xf; + unsigned long byte; + + if (which < 8) + byte = (rs1 >> (which * 8)) & 0xff; + else + byte = (rs2 >> ((which-8)*8)) & 0xff; + rd_val |= (byte << (i * 8)); + } + + *fpd_regaddr(f, RD(insn)) = rd_val; +} + +static void pdist(struct pt_regs *regs, unsigned int insn) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, *rd, rd_val; + unsigned long i; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + rd = fpd_regaddr(f, RD(insn)); + + rd_val = *rd; + + for (i = 0; i < 8; i++) { + s16 s1, s2; + + s1 = (rs1 >> (56 - (i * 8))) & 0xff; + s2 = (rs2 >> (56 - (i * 8))) & 0xff; + + /* Absolute value of difference. */ + s1 -= s2; + if (s1 < 0) + s1 = ~s1 + 1; + + rd_val += s1; + } + + *rd = rd_val; +} + +static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, gsr, scale, rd_val; + + gsr = current_thread_info()->gsr[0]; + scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); + switch (opf) { + case FPACK16_OPF: { + unsigned long byte; + + rs2 = fpd_regval(f, RS2(insn)); + rd_val = 0; + for (byte = 0; byte < 4; byte++) { + unsigned int val; + s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL; + int scaled = src << scale; + int from_fixed = scaled >> 7; + + val = ((from_fixed < 0) ? + 0 : + (from_fixed > 255) ? + 255 : from_fixed); + + rd_val |= (val << (8 * byte)); + } + *fps_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FPACK32_OPF: { + unsigned long word; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL); + for (word = 0; word < 2; word++) { + unsigned long val; + s32 src = (rs2 >> (word * 32UL)); + s64 scaled = src << scale; + s64 from_fixed = scaled >> 23; + + val = ((from_fixed < 0) ? + 0 : + (from_fixed > 255) ? + 255 : from_fixed); + + rd_val |= (val << (32 * word)); + } + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FPACKFIX_OPF: { + unsigned long word; + + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0; + for (word = 0; word < 2; word++) { + long val; + s32 src = (rs2 >> (word * 32UL)); + s64 scaled = src << scale; + s64 from_fixed = scaled >> 16; + + val = ((from_fixed < -32768) ? + -32768 : + (from_fixed > 32767) ? + 32767 : from_fixed); + + rd_val |= ((val & 0xffff) << (word * 16)); + } + *fps_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FEXPAND_OPF: { + unsigned long byte; + + rs2 = fps_regval(f, RS2(insn)); + + rd_val = 0; + for (byte = 0; byte < 4; byte++) { + unsigned long val; + u8 src = (rs2 >> (byte * 8)) & 0xff; + + val = src << 4; + + rd_val |= (val << (byte * 16)); + } + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FPMERGE_OPF: { + rs1 = fps_regval(f, RS1(insn)); + rs2 = fps_regval(f, RS2(insn)); + + rd_val = (((rs2 & 0x000000ff) << 0) | + ((rs1 & 0x000000ff) << 8) | + ((rs2 & 0x0000ff00) << 8) | + ((rs1 & 0x0000ff00) << 16) | + ((rs2 & 0x00ff0000) << 16) | + ((rs1 & 0x00ff0000) << 24) | + ((rs2 & 0xff000000) << 24) | + ((rs1 & 0xff000000) << 32)); + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + } +} + +static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, rd_val; + + switch (opf) { + case FMUL8x16_OPF: { + unsigned long byte; + + rs1 = fps_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0; + for (byte = 0; byte < 4; byte++) { + u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; + s16 src2 = (rs2 >> (byte * 16)) & 0xffff; + u32 prod = src1 * src2; + u16 scaled = ((prod & 0x00ffff00) >> 8); + + /* Round up. */ + if (prod & 0x80) + scaled++; + rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); + } + + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FMUL8x16AU_OPF: + case FMUL8x16AL_OPF: { + unsigned long byte; + s16 src2; + + rs1 = fps_regval(f, RS1(insn)); + rs2 = fps_regval(f, RS2(insn)); + + rd_val = 0; + src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0); + for (byte = 0; byte < 4; byte++) { + u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; + u32 prod = src1 * src2; + u16 scaled = ((prod & 0x00ffff00) >> 8); + + /* Round up. */ + if (prod & 0x80) + scaled++; + rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); + } + + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FMUL8SUx16_OPF: + case FMUL8ULx16_OPF: { + unsigned long byte, ushift; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0; + ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0; + for (byte = 0; byte < 4; byte++) { + u16 src1; + s16 src2; + u32 prod; + u16 scaled; + + src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); + src2 = ((rs2 >> (16 * byte)) & 0xffff); + prod = src1 * src2; + scaled = ((prod & 0x00ffff00) >> 8); + + /* Round up. */ + if (prod & 0x80) + scaled++; + rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); + } + + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FMULD8SUx16_OPF: + case FMULD8ULx16_OPF: { + unsigned long byte, ushift; + + rs1 = fps_regval(f, RS1(insn)); + rs2 = fps_regval(f, RS2(insn)); + + rd_val = 0; + ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0; + for (byte = 0; byte < 2; byte++) { + u16 src1; + s16 src2; + u32 prod; + u16 scaled; + + src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); + src2 = ((rs2 >> (16 * byte)) & 0xffff); + prod = src1 * src2; + scaled = ((prod & 0x00ffff00) >> 8); + + /* Round up. */ + if (prod & 0x80) + scaled++; + rd_val |= ((scaled & 0xffffUL) << + ((byte * 32UL) + 7UL)); + } + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + } +} + +static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, rd_val, i; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0; + + switch (opf) { + case FCMPGT16_OPF: + for (i = 0; i < 4; i++) { + s16 a = (rs1 >> (i * 16)) & 0xffff; + s16 b = (rs2 >> (i * 16)) & 0xffff; + + if (a > b) + rd_val |= 8 >> i; + } + break; + + case FCMPGT32_OPF: + for (i = 0; i < 2; i++) { + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; + + if (a > b) + rd_val |= 2 >> i; + } + break; + + case FCMPLE16_OPF: + for (i = 0; i < 4; i++) { + s16 a = (rs1 >> (i * 16)) & 0xffff; + s16 b = (rs2 >> (i * 16)) & 0xffff; + + if (a <= b) + rd_val |= 8 >> i; + } + break; + + case FCMPLE32_OPF: + for (i = 0; i < 2; i++) { + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; + + if (a <= b) + rd_val |= 2 >> i; + } + break; + + case FCMPNE16_OPF: + for (i = 0; i < 4; i++) { + s16 a = (rs1 >> (i * 16)) & 0xffff; + s16 b = (rs2 >> (i * 16)) & 0xffff; + + if (a != b) + rd_val |= 8 >> i; + } + break; + + case FCMPNE32_OPF: + for (i = 0; i < 2; i++) { + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; + + if (a != b) + rd_val |= 2 >> i; + } + break; + + case FCMPEQ16_OPF: + for (i = 0; i < 4; i++) { + s16 a = (rs1 >> (i * 16)) & 0xffff; + s16 b = (rs2 >> (i * 16)) & 0xffff; + + if (a == b) + rd_val |= 8 >> i; + } + break; + + case FCMPEQ32_OPF: + for (i = 0; i < 2; i++) { + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; + + if (a == b) + rd_val |= 2 >> i; + } + break; + } + + maybe_flush_windows(0, 0, RD(insn), 0); + store_reg(regs, rd_val, RD(insn)); +} + +/* Emulate the VIS instructions which are not implemented in + * hardware on Niagara. + */ +int vis_emul(struct pt_regs *regs, unsigned int insn) +{ + unsigned long pc = regs->tpc; + unsigned int opf; + + BUG_ON(regs->tstate & TSTATE_PRIV); + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); + + if (test_thread_flag(TIF_32BIT)) + pc = (u32)pc; + + if (get_user(insn, (u32 __user *) pc)) + return -EFAULT; + + save_and_clear_fpu(); + + opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; + switch (opf) { + default: + return -EINVAL; + + /* Pixel Formatting Instructions. */ + case FPACK16_OPF: + case FPACK32_OPF: + case FPACKFIX_OPF: + case FEXPAND_OPF: + case FPMERGE_OPF: + pformat(regs, insn, opf); + break; + + /* Partitioned Multiply Instructions */ + case FMUL8x16_OPF: + case FMUL8x16AU_OPF: + case FMUL8x16AL_OPF: + case FMUL8SUx16_OPF: + case FMUL8ULx16_OPF: + case FMULD8SUx16_OPF: + case FMULD8ULx16_OPF: + pmul(regs, insn, opf); + break; + + /* Pixel Compare Instructions */ + case FCMPGT16_OPF: + case FCMPGT32_OPF: + case FCMPLE16_OPF: + case FCMPLE32_OPF: + case FCMPNE16_OPF: + case FCMPNE32_OPF: + case FCMPEQ16_OPF: + case FCMPEQ32_OPF: + pcmp(regs, insn, opf); + break; + + /* Edge Handling Instructions */ + case EDGE8_OPF: + case EDGE8N_OPF: + case EDGE8L_OPF: + case EDGE8LN_OPF: + case EDGE16_OPF: + case EDGE16N_OPF: + case EDGE16L_OPF: + case EDGE16LN_OPF: + case EDGE32_OPF: + case EDGE32N_OPF: + case EDGE32L_OPF: + case EDGE32LN_OPF: + edge(regs, insn, opf); + break; + + /* Pixel Component Distance */ + case PDIST_OPF: + pdist(regs, insn); + break; + + /* Three-Dimensional Array Addressing Instructions */ + case ARRAY8_OPF: + case ARRAY16_OPF: + case ARRAY32_OPF: + array(regs, insn, opf); + break; + + /* Byte Mask and Shuffle Instructions */ + case BMASK_OPF: + bmask(regs, insn); + break; + + case BSHUFFLE_OPF: + bshuffle(regs, insn); + break; + } + + regs->tpc = regs->tnpc; + regs->tnpc += 4; + return 0; +} diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S new file mode 100644 index 000000000..d55ae65a0 --- /dev/null +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* ld script for sparc32/sparc64 kernel */ + +#include + +#include +#include + +#ifdef CONFIG_SPARC32 +#define INITIAL_ADDRESS 0x10000 + SIZEOF_HEADERS +#define TEXTSTART 0xf0004000 + +#define SMP_CACHE_BYTES_SHIFT 5 + +#else +#define SMP_CACHE_BYTES_SHIFT 6 +#define INITIAL_ADDRESS 0x4000 +#define TEXTSTART 0x0000000000404000 + +#endif + +#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) + +#ifdef CONFIG_SPARC32 +OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc") +OUTPUT_ARCH(sparc) +ENTRY(_start) +jiffies = jiffies_64 + 4; +#else +/* sparc64 */ +OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc") +OUTPUT_ARCH(sparc:v9a) +ENTRY(_start) +jiffies = jiffies_64; +#endif + +#ifdef CONFIG_SPARC64 +ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large") +#endif + +SECTIONS +{ +#ifdef CONFIG_SPARC64 + swapper_pg_dir = 0x0000000000402000; +#endif + . = INITIAL_ADDRESS; + .text TEXTSTART : + { + _text = .; + HEAD_TEXT + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + *(.gnu.warning) + } = 0 + _etext = .; + + RO_DATA(PAGE_SIZE) + + /* Start of data section */ + _sdata = .; + + .data1 : { + *(.data1) + } + RW_DATA(SMP_CACHE_BYTES, 0, THREAD_SIZE) + + /* End of data section */ + _edata = .; + + .fixup : { + __start___fixup = .; + *(.fixup) + __stop___fixup = .; + } + EXCEPTION_TABLE(16) + + . = ALIGN(PAGE_SIZE); + __init_begin = ALIGN(PAGE_SIZE); + INIT_TEXT_SECTION(PAGE_SIZE) + __init_text_end = .; + INIT_DATA_SECTION(16) + + . = ALIGN(4); + .tsb_ldquad_phys_patch : { + __tsb_ldquad_phys_patch = .; + *(.tsb_ldquad_phys_patch) + __tsb_ldquad_phys_patch_end = .; + } + + .tsb_phys_patch : { + __tsb_phys_patch = .; + *(.tsb_phys_patch) + __tsb_phys_patch_end = .; + } + + .cpuid_patch : { + __cpuid_patch = .; + *(.cpuid_patch) + __cpuid_patch_end = .; + } + + .sun4v_1insn_patch : { + __sun4v_1insn_patch = .; + *(.sun4v_1insn_patch) + __sun4v_1insn_patch_end = .; + } + .sun4v_2insn_patch : { + __sun4v_2insn_patch = .; + *(.sun4v_2insn_patch) + __sun4v_2insn_patch_end = .; + } + .leon_1insn_patch : { + __leon_1insn_patch = .; + *(.leon_1insn_patch) + __leon_1insn_patch_end = .; + } + .swapper_tsb_phys_patch : { + __swapper_tsb_phys_patch = .; + *(.swapper_tsb_phys_patch) + __swapper_tsb_phys_patch_end = .; + } + .swapper_4m_tsb_phys_patch : { + __swapper_4m_tsb_phys_patch = .; + *(.swapper_4m_tsb_phys_patch) + __swapper_4m_tsb_phys_patch_end = .; + } + .popc_3insn_patch : { + __popc_3insn_patch = .; + *(.popc_3insn_patch) + __popc_3insn_patch_end = .; + } + .popc_6insn_patch : { + __popc_6insn_patch = .; + *(.popc_6insn_patch) + __popc_6insn_patch_end = .; + } + .pause_3insn_patch : { + __pause_3insn_patch = .; + *(.pause_3insn_patch) + __pause_3insn_patch_end = .; + } + .sun_m7_1insn_patch : { + __sun_m7_1insn_patch = .; + *(.sun_m7_1insn_patch) + __sun_m7_1insn_patch_end = .; + } + .sun_m7_2insn_patch : { + __sun_m7_2insn_patch = .; + *(.sun_m7_2insn_patch) + __sun_m7_2insn_patch_end = .; + } + .get_tick_patch : { + __get_tick_patch = .; + *(.get_tick_patch) + __get_tick_patch_end = .; + } + .pud_huge_patch : { + __pud_huge_patch = .; + *(.pud_huge_patch) + __pud_huge_patch_end = .; + } + .fast_win_ctrl_1insn_patch : { + __fast_win_ctrl_1insn_patch = .; + *(.fast_win_ctrl_1insn_patch) + __fast_win_ctrl_1insn_patch_end = .; + } + PERCPU_SECTION(SMP_CACHE_BYTES) + + . = ALIGN(PAGE_SIZE); + .exit.text : { + EXIT_TEXT + } + + .exit.data : { + EXIT_DATA + } + + . = ALIGN(PAGE_SIZE); + __init_end = .; + BSS_SECTION(0, 0, 0) + _end = . ; + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c new file mode 100644 index 000000000..8f20862cc --- /dev/null +++ b/arch/sparc/kernel/windows.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* windows.c: Routines to deal with register window management + * at the C-code level. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "kernel.h" + +/* Do save's until all user register windows are out of the cpu. */ +void flush_user_windows(void) +{ + register int ctr asm("g5"); + + ctr = 0; + __asm__ __volatile__( + "\n1:\n\t" + "ld [%%g6 + %2], %%g4\n\t" + "orcc %%g0, %%g4, %%g0\n\t" + "add %0, 1, %0\n\t" + "bne 1b\n\t" + " save %%sp, -64, %%sp\n" + "2:\n\t" + "subcc %0, 1, %0\n\t" + "bne 2b\n\t" + " restore %%g0, %%g0, %%g0\n" + : "=&r" (ctr) + : "0" (ctr), + "i" ((const unsigned long)TI_UWINMASK) + : "g4", "cc"); +} + +static inline void shift_window_buffer(int first_win, int last_win, struct thread_info *tp) +{ + int i; + + for(i = first_win; i < last_win; i++) { + tp->rwbuf_stkptrs[i] = tp->rwbuf_stkptrs[i+1]; + memcpy(&tp->reg_window[i], &tp->reg_window[i+1], sizeof(struct reg_window32)); + } +} + +/* Place as many of the user's current register windows + * on the stack that we can. Even if the %sp is unaligned + * we still copy the window there, the only case that we don't + * succeed is if the %sp points to a bum mapping altogether. + * setup_frame() and do_sigreturn() use this before shifting + * the user stack around. Future instruction and hardware + * bug workaround routines will need this functionality as + * well. + */ +void synchronize_user_stack(void) +{ + struct thread_info *tp = current_thread_info(); + int window; + + flush_user_windows(); + if(!tp->w_saved) + return; + + /* Ok, there is some dirty work to do. */ + for(window = tp->w_saved - 1; window >= 0; window--) { + unsigned long sp = tp->rwbuf_stkptrs[window]; + + /* Ok, let it rip. */ + if (copy_to_user((char __user *) sp, &tp->reg_window[window], + sizeof(struct reg_window32))) + continue; + + shift_window_buffer(window, tp->w_saved - 1, tp); + tp->w_saved--; + } +} + +#if 0 +/* An optimization. */ +static inline void copy_aligned_window(void *dest, const void *src) +{ + __asm__ __volatile__("ldd [%1], %%g2\n\t" + "ldd [%1 + 0x8], %%g4\n\t" + "std %%g2, [%0]\n\t" + "std %%g4, [%0 + 0x8]\n\t" + "ldd [%1 + 0x10], %%g2\n\t" + "ldd [%1 + 0x18], %%g4\n\t" + "std %%g2, [%0 + 0x10]\n\t" + "std %%g4, [%0 + 0x18]\n\t" + "ldd [%1 + 0x20], %%g2\n\t" + "ldd [%1 + 0x28], %%g4\n\t" + "std %%g2, [%0 + 0x20]\n\t" + "std %%g4, [%0 + 0x28]\n\t" + "ldd [%1 + 0x30], %%g2\n\t" + "ldd [%1 + 0x38], %%g4\n\t" + "std %%g2, [%0 + 0x30]\n\t" + "std %%g4, [%0 + 0x38]\n\t" : : + "r" (dest), "r" (src) : + "g2", "g3", "g4", "g5"); +} +#endif + +/* Try to push the windows in a threads window buffer to the + * user stack. Unaligned %sp's are not allowed here. + */ + +void try_to_clear_window_buffer(struct pt_regs *regs, int who) +{ + struct thread_info *tp = current_thread_info(); + int window; + + flush_user_windows(); + for(window = 0; window < tp->w_saved; window++) { + unsigned long sp = tp->rwbuf_stkptrs[window]; + + if ((sp & 7) || + copy_to_user((char __user *) sp, &tp->reg_window[window], + sizeof(struct reg_window32))) { + force_exit_sig(SIGILL); + return; + } + } + tp->w_saved = 0; +} diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S new file mode 100644 index 000000000..448accee0 --- /dev/null +++ b/arch/sparc/kernel/winfixup.S @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* winfixup.S: Handle cases where user stack pointer is found to be bogus. + * + * Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include + + .text + + /* It used to be the case that these register window fault + * handlers could run via the save and restore instructions + * done by the trap entry and exit code. They now do the + * window spill/fill by hand, so that case no longer can occur. + */ + + .align 32 +fill_fixup: + TRAP_LOAD_THREAD_REG(%g6, %g1) + rdpr %tstate, %g1 + and %g1, TSTATE_CWP, %g1 + or %g4, FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] + wrpr %g1, %cwp + ba,pt %xcc, etrap + rd %pc, %g7 + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + + /* Be very careful about usage of the trap globals here. + * You cannot touch %g5 as that has the fault information. + */ +spill_fixup: +spill_fixup_mna: +spill_fixup_dax: + TRAP_LOAD_THREAD_REG(%g6, %g1) + ldx [%g6 + TI_FLAGS], %g1 + andcc %sp, 0x1, %g0 + movne %icc, 0, %g1 + andcc %g1, _TIF_32BIT, %g0 + ldub [%g6 + TI_WSAVED], %g1 + sll %g1, 3, %g3 + add %g6, %g3, %g3 + stx %sp, [%g3 + TI_RWIN_SPTRS] + sll %g1, 7, %g3 + bne,pt %xcc, 1f + add %g6, %g3, %g3 + stx %l0, [%g3 + TI_REG_WINDOW + 0x00] + stx %l1, [%g3 + TI_REG_WINDOW + 0x08] + stx %l2, [%g3 + TI_REG_WINDOW + 0x10] + stx %l3, [%g3 + TI_REG_WINDOW + 0x18] + stx %l4, [%g3 + TI_REG_WINDOW + 0x20] + stx %l5, [%g3 + TI_REG_WINDOW + 0x28] + stx %l6, [%g3 + TI_REG_WINDOW + 0x30] + stx %l7, [%g3 + TI_REG_WINDOW + 0x38] + stx %i0, [%g3 + TI_REG_WINDOW + 0x40] + stx %i1, [%g3 + TI_REG_WINDOW + 0x48] + stx %i2, [%g3 + TI_REG_WINDOW + 0x50] + stx %i3, [%g3 + TI_REG_WINDOW + 0x58] + stx %i4, [%g3 + TI_REG_WINDOW + 0x60] + stx %i5, [%g3 + TI_REG_WINDOW + 0x68] + stx %i6, [%g3 + TI_REG_WINDOW + 0x70] + ba,pt %xcc, 2f + stx %i7, [%g3 + TI_REG_WINDOW + 0x78] +1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] + stw %l1, [%g3 + TI_REG_WINDOW + 0x04] + stw %l2, [%g3 + TI_REG_WINDOW + 0x08] + stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] + stw %l4, [%g3 + TI_REG_WINDOW + 0x10] + stw %l5, [%g3 + TI_REG_WINDOW + 0x14] + stw %l6, [%g3 + TI_REG_WINDOW + 0x18] + stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] + stw %i0, [%g3 + TI_REG_WINDOW + 0x20] + stw %i1, [%g3 + TI_REG_WINDOW + 0x24] + stw %i2, [%g3 + TI_REG_WINDOW + 0x28] + stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] + stw %i4, [%g3 + TI_REG_WINDOW + 0x30] + stw %i5, [%g3 + TI_REG_WINDOW + 0x34] + stw %i6, [%g3 + TI_REG_WINDOW + 0x38] + stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] +2: add %g1, 1, %g1 + stb %g1, [%g6 + TI_WSAVED] + rdpr %tstate, %g1 + andcc %g1, TSTATE_PRIV, %g0 + saved + be,pn %xcc, 1f + and %g1, TSTATE_CWP, %g1 + retry +1: mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] + wrpr %g1, %cwp + ba,pt %xcc, etrap + rd %pc, %g7 + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap + +winfix_mna: + andn %g3, 0x7f, %g3 + add %g3, 0x78, %g3 + wrpr %g3, %tnpc + done + +fill_fixup_mna: + rdpr %tstate, %g1 + and %g1, TSTATE_CWP, %g1 + wrpr %g1, %cwp + ba,pt %xcc, etrap + rd %pc, %g7 + sethi %hi(tlb_type), %g1 + lduw [%g1 + %lo(tlb_type)], %g1 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + mov %l4, %o2 + call sun4v_do_mna + mov %l5, %o1 + ba,a,pt %xcc, rtrap +1: mov %l4, %o1 + mov %l5, %o2 + call mem_address_unaligned + nop + ba,a,pt %xcc, rtrap + +winfix_dax: + andn %g3, 0x7f, %g3 + add %g3, 0x74, %g3 + wrpr %g3, %tnpc + done + +fill_fixup_dax: + rdpr %tstate, %g1 + and %g1, TSTATE_CWP, %g1 + wrpr %g1, %cwp + ba,pt %xcc, etrap + rd %pc, %g7 + sethi %hi(tlb_type), %g1 + mov %l4, %o1 + lduw [%g1 + %lo(tlb_type)], %g1 + mov %l5, %o2 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + call sun4v_data_access_exception + nop + ba,a,pt %xcc, rtrap + nop +1: call spitfire_data_access_exception + nop + ba,a,pt %xcc, rtrap + nop diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S new file mode 100644 index 000000000..96a3a1124 --- /dev/null +++ b/arch/sparc/kernel/wof.S @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * wof.S: Sparc window overflow handler. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* WARNING: This routine is hairy and _very_ complicated, but it + * must be as fast as possible as it handles the allocation + * of register windows to the user and kernel. If you touch + * this code be _very_ careful as many other pieces of the + * kernel depend upon how this code behaves. You have been + * duly warned... + */ + +/* We define macro's for registers which have a fixed + * meaning throughout this entire routine. The 'T' in + * the comments mean that the register can only be + * accessed when in the 'trap' window, 'G' means + * accessible in any window. Do not change these registers + * after they have been set, until you are ready to return + * from the trap. + */ +#define t_psr l0 /* %psr at trap time T */ +#define t_pc l1 /* PC for trap return T */ +#define t_npc l2 /* NPC for trap return T */ +#define t_wim l3 /* %wim at trap time T */ +#define saved_g5 l5 /* Global save register T */ +#define saved_g6 l6 /* Global save register T */ +#define curptr g6 /* Gets set to 'current' then stays G */ + +/* Now registers whose values can change within the handler. */ +#define twin_tmp l4 /* Temp reg, only usable in trap window T */ +#define glob_tmp g5 /* Global temporary reg, usable anywhere G */ + + .text + .align 4 + /* BEGINNING OF PATCH INSTRUCTIONS */ + /* On a 7-window Sparc the boot code patches spnwin_* + * instructions with the following ones. + */ + .globl spnwin_patch1_7win, spnwin_patch2_7win, spnwin_patch3_7win +spnwin_patch1_7win: sll %t_wim, 6, %glob_tmp +spnwin_patch2_7win: and %glob_tmp, 0x7f, %glob_tmp +spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp + /* END OF PATCH INSTRUCTIONS */ + + /* The trap entry point has done the following: + * + * rd %psr, %l0 + * rd %wim, %l3 + * b spill_window_entry + * andcc %l0, PSR_PS, %g0 + */ + + /* Datum current_thread_info->uwinmask contains at all times a bitmask + * where if any user windows are active, at least one bit will + * be set in to mask. If no user windows are active, the bitmask + * will be all zeroes. + */ + .globl spill_window_entry + .globl spnwin_patch1, spnwin_patch2, spnwin_patch3 +spill_window_entry: + /* LOCATION: Trap Window */ + + mov %g5, %saved_g5 ! save away global temp register + mov %g6, %saved_g6 ! save away 'current' ptr register + + /* Compute what the new %wim will be if we save the + * window properly in this trap handler. + * + * newwim = ((%wim>>1) | (%wim<<(nwindows - 1))); + */ + srl %t_wim, 0x1, %twin_tmp +spnwin_patch1: sll %t_wim, 7, %glob_tmp + or %glob_tmp, %twin_tmp, %glob_tmp +spnwin_patch2: and %glob_tmp, 0xff, %glob_tmp + + /* The trap entry point has set the condition codes + * up for us to see if this is from user or kernel. + * Get the load of 'curptr' out of the way. + */ + LOAD_CURRENT(curptr, twin_tmp) + + andcc %t_psr, PSR_PS, %g0 + be,a spwin_fromuser ! all user wins, branch + save %g0, %g0, %g0 ! Go where saving will occur + + /* See if any user windows are active in the set. */ + ld [%curptr + TI_UWINMASK], %twin_tmp ! grab win mask + orcc %g0, %twin_tmp, %g0 ! check for set bits + bne spwin_exist_uwins ! yep, there are some + andn %twin_tmp, %glob_tmp, %twin_tmp ! compute new uwinmask + + /* Save into the window which must be saved and do it. + * Basically if we are here, this means that we trapped + * from kernel mode with only kernel windows in the register + * file. + */ + save %g0, %g0, %g0 ! save into the window to stash away + wr %glob_tmp, 0x0, %wim ! set new %wim, this is safe now + +spwin_no_userwins_from_kernel: + /* LOCATION: Window to be saved */ + + STORE_WINDOW(sp) ! stash the window + restore %g0, %g0, %g0 ! go back into trap window + + /* LOCATION: Trap window */ + mov %saved_g5, %g5 ! restore %glob_tmp + mov %saved_g6, %g6 ! restore %curptr + wr %t_psr, 0x0, %psr ! restore condition codes in %psr + WRITE_PAUSE ! waste some time + jmp %t_pc ! Return from trap + rett %t_npc ! we are done + +spwin_exist_uwins: + /* LOCATION: Trap window */ + + /* Wow, user windows have to be dealt with, this is dirty + * and messy as all hell. And difficult to follow if you + * are approaching the infamous register window trap handling + * problem for the first time. DON'T LOOK! + * + * Note that how the execution path works out, the new %wim + * will be left for us in the global temporary register, + * %glob_tmp. We cannot set the new %wim first because we + * need to save into the appropriate window without inducing + * a trap (traps are off, we'd get a watchdog wheee)... + * But first, store the new user window mask calculated + * above. + */ + st %twin_tmp, [%curptr + TI_UWINMASK] + save %g0, %g0, %g0 ! Go to where the saving will occur + +spwin_fromuser: + /* LOCATION: Window to be saved */ + wr %glob_tmp, 0x0, %wim ! Now it is safe to set new %wim + + /* LOCATION: Window to be saved */ + + /* This instruction branches to a routine which will check + * to validity of the users stack pointer by whatever means + * are necessary. This means that this is architecture + * specific and thus this branch instruction will need to + * be patched at boot time once the machine type is known. + * This routine _shall not_ touch %curptr under any + * circumstances whatsoever! It will branch back to the + * label 'spwin_good_ustack' if the stack is ok but still + * needs to be dumped (SRMMU for instance will not need to + * do this) or 'spwin_finish_up' if the stack is ok and the + * registers have already been saved. If the stack is found + * to be bogus for some reason the routine shall branch to + * the label 'spwin_user_stack_is_bolixed' which will take + * care of things at that point. + */ + b spwin_srmmu_stackchk + andcc %sp, 0x7, %g0 + +spwin_good_ustack: + /* LOCATION: Window to be saved */ + + /* The users stack is ok and we can safely save it at + * %sp. + */ + STORE_WINDOW(sp) + +spwin_finish_up: + restore %g0, %g0, %g0 /* Back to trap window. */ + + /* LOCATION: Trap window */ + + /* We have spilled successfully, and we have properly stored + * the appropriate window onto the stack. + */ + + /* Restore saved globals */ + mov %saved_g5, %g5 + mov %saved_g6, %g6 + + wr %t_psr, 0x0, %psr + WRITE_PAUSE + jmp %t_pc + rett %t_npc + +spwin_user_stack_is_bolixed: + /* LOCATION: Window to be saved */ + + /* Wheee, user has trashed his/her stack. We have to decide + * how to proceed based upon whether we came from kernel mode + * or not. If we came from kernel mode, toss the window into + * a special buffer and proceed, the kernel _needs_ a window + * and we could be in an interrupt handler so timing is crucial. + * If we came from user land we build a full stack frame and call + * c-code to gun down the process. + */ + rd %psr, %glob_tmp + andcc %glob_tmp, PSR_PS, %g0 + bne spwin_bad_ustack_from_kernel + nop + + /* Oh well, throw this one window into the per-task window + * buffer, the first one. + */ + st %sp, [%curptr + TI_RWIN_SPTRS] + STORE_WINDOW(curptr + TI_REG_WINDOW) + restore %g0, %g0, %g0 + + /* LOCATION: Trap Window */ + + /* Back in the trap window, update winbuffer save count. */ + mov 1, %twin_tmp + st %twin_tmp, [%curptr + TI_W_SAVED] + + /* Compute new user window mask. What we are basically + * doing is taking two windows, the invalid one at trap + * time and the one we attempted to throw onto the users + * stack, and saying that everything else is an ok user + * window. umask = ((~(%t_wim | %wim)) & valid_wim_bits) + */ + rd %wim, %twin_tmp + or %twin_tmp, %t_wim, %twin_tmp + not %twin_tmp +spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs + st %twin_tmp, [%curptr + TI_UWINMASK] + +#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ) + + sethi %hi(STACK_OFFSET), %sp + or %sp, %lo(STACK_OFFSET), %sp + add %curptr, %sp, %sp + + /* Restore the saved globals and build a pt_regs frame. */ + mov %saved_g5, %g5 + mov %saved_g6, %g6 + STORE_PT_ALL(sp, t_psr, t_pc, t_npc, g1) + + sethi %hi(STACK_OFFSET), %g6 + or %g6, %lo(STACK_OFFSET), %g6 + sub %sp, %g6, %g6 ! curptr + + /* Turn on traps and call c-code to deal with it. */ + wr %t_psr, PSR_ET, %psr + nop + call window_overflow_fault + nop + + /* Return from trap if C-code actually fixes things, if it + * doesn't then we never get this far as the process will + * be given the look of death from Commander Peanut. + */ + b ret_trap_entry + clr %l6 + +spwin_bad_ustack_from_kernel: + /* LOCATION: Window to be saved */ + + /* The kernel provoked a spill window trap, but the window we + * need to save is a user one and the process has trashed its + * stack pointer. We need to be quick, so we throw it into + * a per-process window buffer until we can properly handle + * this later on. + */ + SAVE_BOLIXED_USER_STACK(curptr, glob_tmp) + restore %g0, %g0, %g0 + + /* LOCATION: Trap window */ + + /* Restore globals, condition codes in the %psr and + * return from trap. Note, restoring %g6 when returning + * to kernel mode is not necessarily these days. ;-) + */ + mov %saved_g5, %g5 + mov %saved_g6, %g6 + + wr %t_psr, 0x0, %psr + WRITE_PAUSE + + jmp %t_pc + rett %t_npc + +/* Undefine the register macros which would only cause trouble + * if used below. This helps find 'stupid' coding errors that + * produce 'odd' behavior. The routines below are allowed to + * make usage of glob_tmp and t_psr so we leave them defined. + */ +#undef twin_tmp +#undef curptr +#undef t_pc +#undef t_npc +#undef t_wim +#undef saved_g5 +#undef saved_g6 + +/* Now come the per-architecture window overflow stack checking routines. + * As noted above %curptr cannot be touched by this routine at all. + */ + + /* This is a generic SRMMU routine. As far as I know this + * works for all current v8/srmmu implementations, we'll + * see... + */ + .globl spwin_srmmu_stackchk +spwin_srmmu_stackchk: + /* LOCATION: Window to be saved on the stack */ + + /* Because of SMP concerns and speed we play a trick. + * We disable fault traps in the MMU control register, + * Execute the stores, then check the fault registers + * to see what happens. I can hear Linus now + * "disgusting... broken hardware...". + * + * But first, check to see if the users stack has ended + * up in kernel vma, then we would succeed for the 'wrong' + * reason... ;( Note that the 'sethi' below assumes the + * kernel is page aligned, which should always be the case. + */ + /* Check results of callers andcc %sp, 0x7, %g0 */ + bne spwin_user_stack_is_bolixed + sethi %hi(PAGE_OFFSET), %glob_tmp + cmp %glob_tmp, %sp + bleu spwin_user_stack_is_bolixed + mov AC_M_SFSR, %glob_tmp + + /* Clear the fault status and turn on the no_fault bit. */ +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) ! eat SFSR +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) ! eat SFSR + +LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control +SUN_PI_(lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control + or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit +LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it +SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it + + /* Dump the registers and cross fingers. */ + STORE_WINDOW(sp) + + /* Clear the no_fault bit and check the status. */ + andn %glob_tmp, 0x2, %glob_tmp +LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) +SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) + + mov AC_M_SFAR, %glob_tmp +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) + + mov AC_M_SFSR, %glob_tmp +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp) +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) + andcc %glob_tmp, 0x2, %g0 ! did we fault? + be,a spwin_finish_up + 0x4 ! cool beans, success + restore %g0, %g0, %g0 + + rd %psr, %glob_tmp + b spwin_user_stack_is_bolixed + 0x4 ! we faulted, ugh + nop diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S new file mode 100644 index 000000000..1a4ca490e --- /dev/null +++ b/arch/sparc/kernel/wuf.S @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * wuf.S: Window underflow trap handler for the Sparc. + * + * Copyright (C) 1995 David S. Miller + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Just like the overflow handler we define macros for registers + * with fixed meanings in this routine. + */ +#define t_psr l0 +#define t_pc l1 +#define t_npc l2 +#define t_wim l3 +/* Don't touch the above registers or else you die horribly... */ + +/* Now macros for the available scratch registers in this routine. */ +#define twin_tmp1 l4 +#define twin_tmp2 l5 + +#define curptr g6 + + .text + .align 4 + + /* The trap entry point has executed the following: + * + * rd %psr, %l0 + * rd %wim, %l3 + * b fill_window_entry + * andcc %l0, PSR_PS, %g0 + */ + + /* Datum current_thread_info->uwinmask contains at all times a bitmask + * where if any user windows are active, at least one bit will + * be set in to mask. If no user windows are active, the bitmask + * will be all zeroes. + */ + + /* To get an idea of what has just happened to cause this + * trap take a look at this diagram: + * + * 1 2 3 4 <-- Window number + * ---------- + * T O W I <-- Symbolic name + * + * O == the window that execution was in when + * the restore was attempted + * + * T == the trap itself has save'd us into this + * window + * + * W == this window is the one which is now invalid + * and must be made valid plus loaded from the + * stack + * + * I == this window will be the invalid one when we + * are done and return from trap if successful + */ + + /* BEGINNING OF PATCH INSTRUCTIONS */ + + /* On 7-window Sparc the boot code patches fnwin_patch1 + * with the following instruction. + */ + .globl fnwin_patch1_7win, fnwin_patch2_7win +fnwin_patch1_7win: srl %t_wim, 6, %twin_tmp2 +fnwin_patch2_7win: and %twin_tmp1, 0x7f, %twin_tmp1 + /* END OF PATCH INSTRUCTIONS */ + + .globl fill_window_entry, fnwin_patch1, fnwin_patch2 +fill_window_entry: + /* LOCATION: Window 'T' */ + + /* Compute what the new %wim is going to be if we retrieve + * the proper window off of the stack. + */ + sll %t_wim, 1, %twin_tmp1 +fnwin_patch1: srl %t_wim, 7, %twin_tmp2 + or %twin_tmp1, %twin_tmp2, %twin_tmp1 +fnwin_patch2: and %twin_tmp1, 0xff, %twin_tmp1 + + wr %twin_tmp1, 0x0, %wim /* Make window 'I' invalid */ + + andcc %t_psr, PSR_PS, %g0 + be fwin_from_user + restore %g0, %g0, %g0 /* Restore to window 'O' */ + + /* Trapped from kernel, we trust that the kernel does not + * 'over restore' sorta speak and just grab the window + * from the stack and return. Easy enough. + */ +fwin_from_kernel: + /* LOCATION: Window 'O' */ + + restore %g0, %g0, %g0 + + /* LOCATION: Window 'W' */ + + LOAD_WINDOW(sp) /* Load it up */ + + /* Spin the wheel... */ + save %g0, %g0, %g0 + save %g0, %g0, %g0 + /* I'd like to buy a vowel please... */ + + /* LOCATION: Window 'T' */ + + /* Now preserve the condition codes in %psr, pause, and + * return from trap. This is the simplest case of all. + */ + wr %t_psr, 0x0, %psr + WRITE_PAUSE + + jmp %t_pc + rett %t_npc + +fwin_from_user: + /* LOCATION: Window 'O' */ + + restore %g0, %g0, %g0 /* Restore to window 'W' */ + + /* LOCATION: Window 'W' */ + + /* Branch to the stack validation routine */ + b srmmu_fwin_stackchk + andcc %sp, 0x7, %g0 + +#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ) + +fwin_user_stack_is_bolixed: + /* LOCATION: Window 'W' */ + + /* Place a pt_regs frame on the kernel stack, save back + * to the trap window and call c-code to deal with this. + */ + LOAD_CURRENT(l4, l5) + + sethi %hi(STACK_OFFSET), %l5 + or %l5, %lo(STACK_OFFSET), %l5 + add %l4, %l5, %l5 + + /* Store globals into pt_regs frame. */ + STORE_PT_GLOBALS(l5) + STORE_PT_YREG(l5, g3) + + /* Save current in a global while we change windows. */ + mov %l4, %curptr + + save %g0, %g0, %g0 + + /* LOCATION: Window 'O' */ + + rd %psr, %g3 /* Read %psr in live user window */ + mov %fp, %g4 /* Save bogus frame pointer. */ + + save %g0, %g0, %g0 + + /* LOCATION: Window 'T' */ + + sethi %hi(STACK_OFFSET), %l5 + or %l5, %lo(STACK_OFFSET), %l5 + add %curptr, %l5, %sp + + /* Build rest of pt_regs. */ + STORE_PT_INS(sp) + STORE_PT_PRIV(sp, t_psr, t_pc, t_npc) + + /* re-set trap time %wim value */ + wr %t_wim, 0x0, %wim + + /* Fix users window mask and buffer save count. */ + mov 0x1, %g5 + sll %g5, %g3, %g5 + st %g5, [%curptr + TI_UWINMASK] ! one live user window still + st %g0, [%curptr + TI_W_SAVED] ! no windows in the buffer + + wr %t_psr, PSR_ET, %psr ! enable traps + nop + call window_underflow_fault + mov %g4, %o0 + + b ret_trap_entry + clr %l6 + +fwin_user_stack_is_ok: + /* LOCATION: Window 'W' */ + + /* The users stack area is kosher and mapped, load the + * window and fall through to the finish up routine. + */ + LOAD_WINDOW(sp) + + /* Round and round she goes... */ + save %g0, %g0, %g0 /* Save to window 'O' */ + save %g0, %g0, %g0 /* Save to window 'T' */ + /* Where she'll trap nobody knows... */ + + /* LOCATION: Window 'T' */ + +fwin_user_finish_up: + /* LOCATION: Window 'T' */ + + wr %t_psr, 0x0, %psr + WRITE_PAUSE + + jmp %t_pc + rett %t_npc + + /* Here come the architecture specific checks for stack. + * mappings. Note that unlike the window overflow handler + * we only need to check whether the user can read from + * the appropriate addresses. Also note that we are in + * an invalid window which will be loaded, and this means + * that until we actually load the window up we are free + * to use any of the local registers contained within. + * + * On success these routine branch to fwin_user_stack_is_ok + * if the area at %sp is user readable and the window still + * needs to be loaded, else fwin_user_finish_up if the + * routine has done the loading itself. On failure (bogus + * user stack) the routine shall branch to the label called + * fwin_user_stack_is_bolixed. + * + * Contrary to the arch-specific window overflow stack + * check routines in wof.S, these routines are free to use + * any of the local registers they want to as this window + * does not belong to anyone at this point, however the + * outs and ins are still verboten as they are part of + * 'someone elses' window possibly. + */ + + .globl srmmu_fwin_stackchk +srmmu_fwin_stackchk: + /* LOCATION: Window 'W' */ + + /* Caller did 'andcc %sp, 0x7, %g0' */ + bne fwin_user_stack_is_bolixed + sethi %hi(PAGE_OFFSET), %l5 + + /* Check if the users stack is in kernel vma, then our + * trial and error technique below would succeed for + * the 'wrong' reason. + */ + mov AC_M_SFSR, %l4 + cmp %l5, %sp + bleu fwin_user_stack_is_bolixed +LEON_PI( lda [%l4] ASI_LEON_MMUREGS, %g0) ! clear fault status +SUN_PI_( lda [%l4] ASI_M_MMUREGS, %g0) ! clear fault status + + /* The technique is, turn off faults on this processor, + * just let the load rip, then check the sfsr to see if + * a fault did occur. Then we turn on fault traps again + * and branch conditionally based upon what happened. + */ +LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg +SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg + or %l5, 0x2, %l5 ! turn on no-fault bit +LEON_PI(sta %l5, [%g0] ASI_LEON_MMUREGS) ! store it +SUN_PI_(sta %l5, [%g0] ASI_M_MMUREGS) ! store it + + /* Cross fingers and go for it. */ + LOAD_WINDOW(sp) + + /* A penny 'saved'... */ + save %g0, %g0, %g0 + save %g0, %g0, %g0 + /* Is a BADTRAP earned... */ + + /* LOCATION: Window 'T' */ + +LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again +SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again + andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit +LEON_PI(sta %twin_tmp1, [%g0] ASI_LEON_MMUREGS) ! store it +SUN_PI_(sta %twin_tmp1, [%g0] ASI_M_MMUREGS) ! store it + + mov AC_M_SFAR, %twin_tmp2 +LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %g0) ! read fault address +SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %g0) ! read fault address + + mov AC_M_SFSR, %twin_tmp2 +LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status +SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2) ! read fault status + andcc %twin_tmp2, 0x2, %g0 ! did fault occur? + + bne 1f ! yep, cleanup + nop + + wr %t_psr, 0x0, %psr + nop + b fwin_user_finish_up + 0x4 + nop + + /* Did I ever tell you about my window lobotomy? + * anyways... fwin_user_stack_is_bolixed expects + * to be in window 'W' so make it happy or else + * we watchdog badly. + */ +1: + restore %g0, %g0, %g0 + b fwin_user_stack_is_bolixed ! oh well + restore %g0, %g0, %g0 -- cgit v1.2.3