diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/openrisc | |
parent | Initial commit. (diff) | |
download | linux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip |
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/openrisc')
95 files changed, 11874 insertions, 0 deletions
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig new file mode 100644 index 000000000..e0081e734 --- /dev/null +++ b/arch/openrisc/Kconfig @@ -0,0 +1,201 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# + +config OPENRISC + def_bool y + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select DMA_NONCOHERENT_OPS + select OF + select OF_EARLY_FLATTREE + select IRQ_DOMAIN + select HANDLE_DOMAIN_IRQ + select HAVE_MEMBLOCK + select GPIOLIB + select HAVE_ARCH_TRACEHOOK + select SPARSE_IRQ + select GENERIC_IRQ_CHIP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_IOMAP + select GENERIC_CPU_DEVICES + select HAVE_UID16 + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_CLOCKEVENTS_BROADCAST + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER + select GENERIC_SMP_IDLE_THREAD + select MODULES_USE_ELF_RELA + select HAVE_DEBUG_STACKOVERFLOW + select OR1K_PIC + select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1 + select NO_BOOTMEM + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_USE_QUEUED_RWLOCKS + select OMPIC if SMP + select ARCH_WANT_FRAME_POINTERS + select GENERIC_IRQ_MULTI_HANDLER + +config CPU_BIG_ENDIAN + def_bool y + +config MMU + def_bool y + +config RWSEM_GENERIC_SPINLOCK + def_bool y + +config RWSEM_XCHGADD_ALGORITHM + def_bool n + +config GENERIC_HWEIGHT + def_bool y + +config NO_IOPORT_MAP + def_bool y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +# For now, use generic checksum functions +#These can be reimplemented in assembly later if so inclined +config GENERIC_CSUM + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config LOCKDEP_SUPPORT + def_bool y + +menu "Processor type and features" + +choice + prompt "Subarchitecture" + default OR1K_1200 + +config OR1K_1200 + bool "OR1200" + help + Generic OpenRISC 1200 architecture + +endchoice + +config DCACHE_WRITETHROUGH + bool "Have write through data caches" + default n + help + Select this if your implementation features write through data caches. + Selecting 'N' here will allow the kernel to force flushing of data + caches at relevant times. Most OpenRISC implementations support write- + through data caches. + + If unsure say N here + +config OPENRISC_BUILTIN_DTB + string "Builtin DTB" + default "" + +menu "Class II Instructions" + +config OPENRISC_HAVE_INST_FF1 + bool "Have instruction l.ff1" + default y + help + Select this if your implementation has the Class II instruction l.ff1 + +config OPENRISC_HAVE_INST_FL1 + bool "Have instruction l.fl1" + default y + help + Select this if your implementation has the Class II instruction l.fl1 + +config OPENRISC_HAVE_INST_MUL + bool "Have instruction l.mul for hardware multiply" + default y + help + Select this if your implementation has a hardware multiply instruction + +config OPENRISC_HAVE_INST_DIV + bool "Have instruction l.div for hardware divide" + default y + help + Select this if your implementation has a hardware divide instruction +endmenu + +config NR_CPUS + int "Maximum number of CPUs (2-32)" + range 2 32 + depends on SMP + default "2" + +config SMP + bool "Symmetric Multi-Processing support" + help + This enables support for systems with more than one CPU. If you have + a system with only one CPU, say N. If you have a system with more + than one CPU, say Y. + + If you don't know what to do here, say N. + +source kernel/Kconfig.hz + +config OPENRISC_NO_SPR_SR_DSX + bool "use SPR_SR_DSX software emulation" if OR1K_1200 + default y + help + SPR_SR_DSX bit is status register bit indicating whether + the last exception has happened in delay slot. + + OpenRISC architecture makes it optional to have it implemented + in hardware and the OR1200 does not have it. + + Say N here if you know that your OpenRISC processor has + SPR_SR_DSX bit implemented. Say Y if you are unsure. + +config OPENRISC_HAVE_SHADOW_GPRS + bool "Support for shadow gpr files" if !SMP + default y if SMP + help + Say Y here if your OpenRISC processor features shadowed + register files. They will in such case be used as a + scratch reg storage on exception entry. + + On SMP systems, this feature is mandatory. + On a unicore system it's safe to say N here if you are unsure. + +config CMDLINE + string "Default kernel command string" + default "" + help + On some architectures there is currently no way for the boot loader + to pass arguments to the kernel. For these architectures, you should + supply some command-line options at build time by entering them + here. + +menu "Debugging options" + +config JUMP_UPON_UNHANDLED_EXCEPTION + bool "Try to die gracefully" + default y + help + Now this puts kernel into infinite loop after first oops. Till + your kernel crashes this doesn't have any influence. + + Say Y if you are unsure. + +config OPENRISC_ESR_EXCEPTION_BUG_CHECK + bool "Check for possible ESR exception bug" + default n + help + This option enables some checks that might expose some problems + in kernel. + + Say N if you are unsure. + +endmenu + +endmenu diff --git a/arch/openrisc/Kconfig.debug b/arch/openrisc/Kconfig.debug new file mode 100644 index 000000000..22a162cd9 --- /dev/null +++ b/arch/openrisc/Kconfig.debug @@ -0,0 +1 @@ +# dummy file, do not delete diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile new file mode 100644 index 000000000..70e06d340 --- /dev/null +++ b/arch/openrisc/Makefile @@ -0,0 +1,54 @@ +# BK Id: %F% %I% %G% %U% %#% +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. Remember to do have actions +# for "archclean" and "archdep" for cleaning up and making dependencies for +# this architecture +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1994 by Linus Torvalds +# Modifications for the OpenRISC architecture: +# Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> +# Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> +# +# Based on: +# arch/i386/Makefile + +KBUILD_DEFCONFIG := or1ksim_defconfig + +OBJCOPYFLAGS := -O binary -R .note -R .comment -S +LDFLAGS_vmlinux := +LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) + +KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ + +ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y) + KBUILD_CFLAGS += $(call cc-option,-mhard-mul) +else + KBUILD_CFLAGS += $(call cc-option,-msoft-mul) +endif + +ifeq ($(CONFIG_OPENRISC_HAVE_INST_DIV),y) + KBUILD_CFLAGS += $(call cc-option,-mhard-div) +else + KBUILD_CFLAGS += $(call cc-option,-msoft-div) +endif + +head-y := arch/openrisc/kernel/head.o + +core-y += arch/openrisc/lib/ \ + arch/openrisc/kernel/ \ + arch/openrisc/mm/ +libs-y += $(LIBGCC) + +ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""' +BUILTIN_DTB := y +else +BUILTIN_DTB := n +endif +core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ + +all: vmlinux diff --git a/arch/openrisc/boot/dts/Makefile b/arch/openrisc/boot/dts/Makefile new file mode 100644 index 000000000..17dd791a8 --- /dev/null +++ b/arch/openrisc/boot/dts/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""' +BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_OPENRISC_BUILTIN_DTB)).dtb.o +else +BUILTIN_DTB := +endif +obj-y += $(BUILTIN_DTB) + +#DTC_FLAGS ?= -p 1024 diff --git a/arch/openrisc/boot/dts/or1ksim.dts b/arch/openrisc/boot/dts/or1ksim.dts new file mode 100644 index 000000000..d8aa8309c --- /dev/null +++ b/arch/openrisc/boot/dts/or1ksim.dts @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/ { + compatible = "opencores,or1ksim"; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&pic>; + + aliases { + uart0 = &serial0; + }; + + chosen { + bootargs = "earlycon"; + stdout-path = "uart0:115200"; + }; + + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x02000000>; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu@0 { + compatible = "opencores,or1200-rtlsvn481"; + reg = <0>; + clock-frequency = <20000000>; + }; + }; + + /* + * OR1K PIC is built into CPU and accessed via special purpose + * registers. It is not addressable and, hence, has no 'reg' + * property. + */ + pic: pic { + compatible = "opencores,or1k-pic"; + #interrupt-cells = <1>; + interrupt-controller; + }; + + serial0: serial@90000000 { + compatible = "opencores,uart16550-rtlsvn105", "ns16550a"; + reg = <0x90000000 0x100>; + interrupts = <2>; + clock-frequency = <20000000>; + }; + + enet0: ethoc@92000000 { + compatible = "opencores,ethmac-rtlsvn338"; + reg = <0x92000000 0x100>; + interrupts = <4>; + }; +}; diff --git a/arch/openrisc/boot/dts/simple_smp.dts b/arch/openrisc/boot/dts/simple_smp.dts new file mode 100644 index 000000000..defbb9271 --- /dev/null +++ b/arch/openrisc/boot/dts/simple_smp.dts @@ -0,0 +1,63 @@ +/dts-v1/; +/ { + compatible = "opencores,or1ksim"; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&pic>; + + aliases { + uart0 = &serial0; + }; + + chosen { + bootargs = "earlycon"; + stdout-path = "uart0:115200"; + }; + + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x02000000>; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu@0 { + compatible = "opencores,or1200-rtlsvn481"; + reg = <0>; + clock-frequency = <20000000>; + }; + cpu@1 { + compatible = "opencores,or1200-rtlsvn481"; + reg = <1>; + clock-frequency = <20000000>; + }; + }; + + ompic: ompic@98000000 { + compatible = "openrisc,ompic"; + reg = <0x98000000 16>; + interrupt-controller; + #interrupt-cells = <0>; + interrupts = <1>; + }; + + /* + * OR1K PIC is built into CPU and accessed via special purpose + * registers. It is not addressable and, hence, has no 'reg' + * property. + */ + pic: pic { + compatible = "opencores,or1k-pic-level"; + #interrupt-cells = <1>; + interrupt-controller; + }; + + serial0: serial@90000000 { + compatible = "opencores,uart16550-rtlsvn105", "ns16550a"; + reg = <0x90000000 0x100>; + interrupts = <2>; + clock-frequency = <20000000>; + }; + +}; diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig new file mode 100644 index 000000000..a73aa9050 --- /dev/null +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -0,0 +1,58 @@ +CONFIG_CROSS_COMPILE="or1k-linux-" +CONFIG_NO_HZ=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_GZIP is not set +CONFIG_EXPERT=y +# CONFIG_KALLSYMS is not set +# CONFIG_EPOLL is not set +# CONFIG_TIMERFD is not set +# CONFIG_EVENTFD is not set +# CONFIG_AIO is not set +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SLOB=y +CONFIG_MODULES=y +# CONFIG_BLOCK is not set +CONFIG_OPENRISC_BUILTIN_DTB="or1ksim" +CONFIG_HZ_100=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_CUBIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +CONFIG_PROC_DEVICETREE=y +CONFIG_NETDEVICES=y +CONFIG_ETHOC=y +CONFIG_MICREL_PHY=y +# CONFIG_WLAN is not set +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/openrisc/configs/simple_smp_defconfig b/arch/openrisc/configs/simple_smp_defconfig new file mode 100644 index 000000000..b6e3c7e15 --- /dev/null +++ b/arch/openrisc/configs/simple_smp_defconfig @@ -0,0 +1,66 @@ +CONFIG_CROSS_COMPILE="or1k-linux-" +CONFIG_LOCALVERSION="-simple-smp" +CONFIG_NO_HZ=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_GZIP is not set +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_EXPERT=y +# CONFIG_KALLSYMS is not set +# CONFIG_EPOLL is not set +# CONFIG_TIMERFD is not set +# CONFIG_EVENTFD is not set +# CONFIG_AIO is not set +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SLOB=y +CONFIG_MODULES=y +# CONFIG_BLOCK is not set +CONFIG_OPENRISC_BUILTIN_DTB="simple_smp" +CONFIG_SMP=y +CONFIG_HZ_100=y +CONFIG_OPENRISC_HAVE_SHADOW_GPRS=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_CUBIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +CONFIG_NETDEVICES=y +CONFIG_ETHOC=y +CONFIG_MICREL_PHY=y +# CONFIG_WLAN is not set +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_XZ_DEC=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_RCU_TRACE is not set diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild new file mode 100644 index 000000000..eb87cd832 --- /dev/null +++ b/arch/openrisc/include/asm/Kbuild @@ -0,0 +1,44 @@ +generic-y += barrier.h +generic-y += bug.h +generic-y += bugs.h +generic-y += checksum.h +generic-y += compat.h +generic-y += current.h +generic-y += device.h +generic-y += div64.h +generic-y += dma.h +generic-y += dma-mapping.h +generic-y += emergency-restart.h +generic-y += exec.h +generic-y += extable.h +generic-y += fb.h +generic-y += ftrace.h +generic-y += hardirq.h +generic-y += hw_irq.h +generic-y += irq.h +generic-y += irq_regs.h +generic-y += irq_work.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kprobes.h +generic-y += local.h +generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h +generic-y += module.h +generic-y += pci.h +generic-y += percpu.h +generic-y += preempt.h +generic-y += qspinlock_types.h +generic-y += qspinlock.h +generic-y += qrwlock_types.h +generic-y += qrwlock.h +generic-y += sections.h +generic-y += segment.h +generic-y += string.h +generic-y += switch_to.h +generic-y += topology.h +generic-y += trace_clock.h +generic-y += user.h +generic-y += vga.h +generic-y += word-at-a-time.h +generic-y += xor.h diff --git a/arch/openrisc/include/asm/asm-offsets.h b/arch/openrisc/include/asm/asm-offsets.h new file mode 100644 index 000000000..d370ee36a --- /dev/null +++ b/arch/openrisc/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h new file mode 100644 index 000000000..b589fac39 --- /dev/null +++ b/arch/openrisc/include/asm/atomic.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __ASM_OPENRISC_ATOMIC_H +#define __ASM_OPENRISC_ATOMIC_H + +#include <linux/types.h> + +/* Atomically perform op with v->counter and i */ +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int tmp; \ + \ + __asm__ __volatile__( \ + "1: l.lwa %0,0(%1) \n" \ + " l." #op " %0,%0,%2 \n" \ + " l.swa 0(%1),%0 \n" \ + " l.bnf 1b \n" \ + " l.nop \n" \ + : "=&r"(tmp) \ + : "r"(&v->counter), "r"(i) \ + : "cc", "memory"); \ +} + +/* Atomically perform op with v->counter and i, return the result */ +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int tmp; \ + \ + __asm__ __volatile__( \ + "1: l.lwa %0,0(%1) \n" \ + " l." #op " %0,%0,%2 \n" \ + " l.swa 0(%1),%0 \n" \ + " l.bnf 1b \n" \ + " l.nop \n" \ + : "=&r"(tmp) \ + : "r"(&v->counter), "r"(i) \ + : "cc", "memory"); \ + \ + return tmp; \ +} + +/* Atomically perform op with v->counter and i, return orig v->counter */ +#define ATOMIC_FETCH_OP(op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int tmp, old; \ + \ + __asm__ __volatile__( \ + "1: l.lwa %0,0(%2) \n" \ + " l." #op " %1,%0,%3 \n" \ + " l.swa 0(%2),%1 \n" \ + " l.bnf 1b \n" \ + " l.nop \n" \ + : "=&r"(old), "=&r"(tmp) \ + : "r"(&v->counter), "r"(i) \ + : "cc", "memory"); \ + \ + return old; \ +} + +ATOMIC_OP_RETURN(add) +ATOMIC_OP_RETURN(sub) + +ATOMIC_FETCH_OP(add) +ATOMIC_FETCH_OP(sub) +ATOMIC_FETCH_OP(and) +ATOMIC_FETCH_OP(or) +ATOMIC_FETCH_OP(xor) + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#define atomic_add_return atomic_add_return +#define atomic_sub_return atomic_sub_return +#define atomic_fetch_add atomic_fetch_add +#define atomic_fetch_sub atomic_fetch_sub +#define atomic_fetch_and atomic_fetch_and +#define atomic_fetch_or atomic_fetch_or +#define atomic_fetch_xor atomic_fetch_xor +#define atomic_and atomic_and +#define atomic_or atomic_or +#define atomic_xor atomic_xor + +/* + * Atomically add a to v->counter as long as v is not already u. + * Returns the original value at v->counter. + * + * This is often used through atomic_inc_not_zero() + */ +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, tmp; + + __asm__ __volatile__( + "1: l.lwa %0, 0(%2) \n" + " l.sfeq %0, %4 \n" + " l.bf 2f \n" + " l.add %1, %0, %3 \n" + " l.swa 0(%2), %1 \n" + " l.bnf 1b \n" + " l.nop \n" + "2: \n" + : "=&r"(old), "=&r" (tmp) + : "r"(&v->counter), "r"(a), "r"(u) + : "cc", "memory"); + + return old; +} +#define atomic_fetch_add_unless atomic_fetch_add_unless + +#include <asm-generic/atomic.h> + +#endif /* __ASM_OPENRISC_ATOMIC_H */ diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h new file mode 100644 index 000000000..753829472 --- /dev/null +++ b/arch/openrisc/include/asm/barrier.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#define mb() asm volatile ("l.msync" ::: "memory") + +#include <asm-generic/barrier.h> + +#endif /* __ASM_BARRIER_H */ diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h new file mode 100644 index 000000000..689f56819 --- /dev/null +++ b/arch/openrisc/include/asm/bitops.h @@ -0,0 +1,53 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_BITOPS_H +#define __ASM_OPENRISC_BITOPS_H + +/* + * Where we haven't written assembly versions yet, we fall back to the + * generic implementations. Otherwise, we pull in our (hopefully) + * optimized versions. + */ + +#include <linux/irqflags.h> +#include <linux/compiler.h> +#include <asm/barrier.h> + +#include <asm/bitops/__ffs.h> +#include <asm-generic/bitops/ffz.h> +#include <asm/bitops/fls.h> +#include <asm/bitops/__fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <asm-generic/bitops/sched.h> +#include <asm/bitops/ffs.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/lock.h> + +#include <asm/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/ext2-atomic.h> + +#endif /* __ASM_GENERIC_BITOPS_H */ diff --git a/arch/openrisc/include/asm/bitops/__ffs.h b/arch/openrisc/include/asm/bitops/__ffs.h new file mode 100644 index 000000000..6c8368a34 --- /dev/null +++ b/arch/openrisc/include/asm/bitops/__ffs.h @@ -0,0 +1,33 @@ +/* + * OpenRISC Linux + * + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC___FFS_H +#define __ASM_OPENRISC___FFS_H + + +#ifdef CONFIG_OPENRISC_HAVE_INST_FF1 + +static inline unsigned long __ffs(unsigned long x) +{ + int ret; + + __asm__ ("l.ff1 %0,%1" + : "=r" (ret) + : "r" (x)); + + return ret-1; +} + +#else +#include <asm-generic/bitops/__ffs.h> +#endif + +#endif /* __ASM_OPENRISC___FFS_H */ diff --git a/arch/openrisc/include/asm/bitops/__fls.h b/arch/openrisc/include/asm/bitops/__fls.h new file mode 100644 index 000000000..c4ecdb4c5 --- /dev/null +++ b/arch/openrisc/include/asm/bitops/__fls.h @@ -0,0 +1,33 @@ +/* + * OpenRISC Linux + * + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC___FLS_H +#define __ASM_OPENRISC___FLS_H + + +#ifdef CONFIG_OPENRISC_HAVE_INST_FL1 + +static inline unsigned long __fls(unsigned long x) +{ + int ret; + + __asm__ ("l.fl1 %0,%1" + : "=r" (ret) + : "r" (x)); + + return ret-1; +} + +#else +#include <asm-generic/bitops/__fls.h> +#endif + +#endif /* __ASM_OPENRISC___FLS_H */ diff --git a/arch/openrisc/include/asm/bitops/atomic.h b/arch/openrisc/include/asm/bitops/atomic.h new file mode 100644 index 000000000..35fb85f61 --- /dev/null +++ b/arch/openrisc/include/asm/bitops/atomic.h @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __ASM_OPENRISC_BITOPS_ATOMIC_H +#define __ASM_OPENRISC_BITOPS_ATOMIC_H + +static inline void set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long tmp; + + __asm__ __volatile__( + "1: l.lwa %0,0(%1) \n" + " l.or %0,%0,%2 \n" + " l.swa 0(%1),%0 \n" + " l.bnf 1b \n" + " l.nop \n" + : "=&r"(tmp) + : "r"(p), "r"(mask) + : "cc", "memory"); +} + +static inline void clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long tmp; + + __asm__ __volatile__( + "1: l.lwa %0,0(%1) \n" + " l.and %0,%0,%2 \n" + " l.swa 0(%1),%0 \n" + " l.bnf 1b \n" + " l.nop \n" + : "=&r"(tmp) + : "r"(p), "r"(~mask) + : "cc", "memory"); +} + +static inline void change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long tmp; + + __asm__ __volatile__( + "1: l.lwa %0,0(%1) \n" + " l.xor %0,%0,%2 \n" + " l.swa 0(%1),%0 \n" + " l.bnf 1b \n" + " l.nop \n" + : "=&r"(tmp) + : "r"(p), "r"(mask) + : "cc", "memory"); +} + +static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long tmp; + + __asm__ __volatile__( + "1: l.lwa %0,0(%2) \n" + " l.or %1,%0,%3 \n" + " l.swa 0(%2),%1 \n" + " l.bnf 1b \n" + " l.nop \n" + : "=&r"(old), "=&r"(tmp) + : "r"(p), "r"(mask) + : "cc", "memory"); + + return (old & mask) != 0; +} + +static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long tmp; + + __asm__ __volatile__( + "1: l.lwa %0,0(%2) \n" + " l.and %1,%0,%3 \n" + " l.swa 0(%2),%1 \n" + " l.bnf 1b \n" + " l.nop \n" + : "=&r"(old), "=&r"(tmp) + : "r"(p), "r"(~mask) + : "cc", "memory"); + + return (old & mask) != 0; +} + +static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long tmp; + + __asm__ __volatile__( + "1: l.lwa %0,0(%2) \n" + " l.xor %1,%0,%3 \n" + " l.swa 0(%2),%1 \n" + " l.bnf 1b \n" + " l.nop \n" + : "=&r"(old), "=&r"(tmp) + : "r"(p), "r"(mask) + : "cc", "memory"); + + return (old & mask) != 0; +} + +#endif /* __ASM_OPENRISC_BITOPS_ATOMIC_H */ diff --git a/arch/openrisc/include/asm/bitops/ffs.h b/arch/openrisc/include/asm/bitops/ffs.h new file mode 100644 index 000000000..9de46246e --- /dev/null +++ b/arch/openrisc/include/asm/bitops/ffs.h @@ -0,0 +1,32 @@ +/* + * OpenRISC Linux + * + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_FFS_H +#define __ASM_OPENRISC_FFS_H + +#ifdef CONFIG_OPENRISC_HAVE_INST_FF1 + +static inline int ffs(int x) +{ + int ret; + + __asm__ ("l.ff1 %0,%1" + : "=r" (ret) + : "r" (x)); + + return ret; +} + +#else +#include <asm-generic/bitops/ffs.h> +#endif + +#endif /* __ASM_OPENRISC_FFS_H */ diff --git a/arch/openrisc/include/asm/bitops/fls.h b/arch/openrisc/include/asm/bitops/fls.h new file mode 100644 index 000000000..9efbf9ad8 --- /dev/null +++ b/arch/openrisc/include/asm/bitops/fls.h @@ -0,0 +1,33 @@ +/* + * OpenRISC Linux + * + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_FLS_H +#define __ASM_OPENRISC_FLS_H + + +#ifdef CONFIG_OPENRISC_HAVE_INST_FL1 + +static inline int fls(int x) +{ + int ret; + + __asm__ ("l.fl1 %0,%1" + : "=r" (ret) + : "r" (x)); + + return ret; +} + +#else +#include <asm-generic/bitops/fls.h> +#endif + +#endif /* __ASM_OPENRISC_FLS_H */ diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h new file mode 100644 index 000000000..5f55da9cb --- /dev/null +++ b/arch/openrisc/include/asm/cache.h @@ -0,0 +1,31 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_CACHE_H +#define __ASM_OPENRISC_CACHE_H + +/* FIXME: How can we replace these with values from the CPU... + * they shouldn't be hard-coded! + */ + +#define __ro_after_init __read_mostly + +#define L1_CACHE_BYTES 16 +#define L1_CACHE_SHIFT 4 + +#endif /* __ASM_OPENRISC_CACHE_H */ diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h new file mode 100644 index 000000000..70f46fd7a --- /dev/null +++ b/arch/openrisc/include/asm/cacheflush.h @@ -0,0 +1,96 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_CACHEFLUSH_H +#define __ASM_CACHEFLUSH_H + +#include <linux/mm.h> + +/* + * Helper function for flushing or invalidating entire pages from data + * and instruction caches. SMP needs a little extra work, since we need + * to flush the pages on all cpus. + */ +extern void local_dcache_page_flush(struct page *page); +extern void local_icache_page_inv(struct page *page); + +/* + * Data cache flushing always happen on the local cpu. Instruction cache + * invalidations need to be broadcasted to all other cpu in the system in + * case of SMP configurations. + */ +#ifndef CONFIG_SMP +#define dcache_page_flush(page) local_dcache_page_flush(page) +#define icache_page_inv(page) local_icache_page_inv(page) +#else /* CONFIG_SMP */ +#define dcache_page_flush(page) local_dcache_page_flush(page) +#define icache_page_inv(page) smp_icache_page_inv(page) +extern void smp_icache_page_inv(struct page *page); +#endif /* CONFIG_SMP */ + +/* + * Synchronizes caches. Whenever a cpu writes executable code to memory, this + * should be called to make sure the processor sees the newly written code. + */ +static inline void sync_icache_dcache(struct page *page) +{ + if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH)) + dcache_page_flush(page); + icache_page_inv(page); +} + +/* + * Pages with this bit set need not be flushed/invalidated, since + * they have not changed since last flush. New pages start with + * PG_arch_1 not set and are therefore dirty by default. + */ +#define PG_dc_clean PG_arch_1 + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +static inline void flush_dcache_page(struct page *page) +{ + clear_bit(PG_dc_clean, &page->flags); +} + +/* + * Other interfaces are not required since we do not have virtually + * indexed or tagged caches. So we can use the default here. + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_range(start, end) do { } while (0) +#define flush_icache_page(vma, pg) do { } while (0) +#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + if (vma->vm_flags & VM_EXEC) \ + sync_icache_dcache(page); \ + } while (0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ASM_CACHEFLUSH_H */ diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h new file mode 100644 index 000000000..f9cd43a39 --- /dev/null +++ b/arch/openrisc/include/asm/cmpxchg.h @@ -0,0 +1,171 @@ +/* + * 1,2 and 4 byte cmpxchg and xchg implementations for OpenRISC. + * + * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + * Note: + * The portable implementations of 1 and 2 byte xchg and cmpxchg using a 4 + * byte cmpxchg is sourced heavily from the sh and mips implementations. + */ + +#ifndef __ASM_OPENRISC_CMPXCHG_H +#define __ASM_OPENRISC_CMPXCHG_H + +#include <linux/bits.h> +#include <linux/compiler.h> +#include <linux/types.h> + +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long cmpxchg_u32(volatile void *ptr, + unsigned long old, unsigned long new) +{ + __asm__ __volatile__( + "1: l.lwa %0, 0(%1) \n" + " l.sfeq %0, %2 \n" + " l.bnf 2f \n" + " l.nop \n" + " l.swa 0(%1), %3 \n" + " l.bnf 1b \n" + " l.nop \n" + "2: \n" + : "=&r"(old) + : "r"(ptr), "r"(old), "r"(new) + : "cc", "memory"); + + return old; +} + +static inline unsigned long xchg_u32(volatile void *ptr, + unsigned long val) +{ + __asm__ __volatile__( + "1: l.lwa %0, 0(%1) \n" + " l.swa 0(%1), %2 \n" + " l.bnf 1b \n" + " l.nop \n" + : "=&r"(val) + : "r"(ptr), "r"(val) + : "cc", "memory"); + + return val; +} + +static inline u32 cmpxchg_small(volatile void *ptr, u32 old, u32 new, + int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 load32, old32, new32; + u32 ret; + + load32 = READ_ONCE(*p); + + while (true) { + ret = (load32 & bitmask) >> bitoff; + if (old != ret) + return ret; + + old32 = (load32 & ~bitmask) | (old << bitoff); + new32 = (load32 & ~bitmask) | (new << bitoff); + + /* Do 32 bit cmpxchg */ + load32 = cmpxchg_u32(p, old32, new32); + if (load32 == old32) + return old; + } +} + +/* xchg */ + +static inline u32 xchg_small(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + +/* + * This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). + */ +extern unsigned long __cmpxchg_called_with_bad_pointer(void) + __compiletime_error("Bad argument size for cmpxchg"); + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + case 2: + return cmpxchg_small(ptr, old, new, size); + case 4: + return cmpxchg_u32(ptr, old, new); + default: + return __cmpxchg_called_with_bad_pointer(); + } +} + +#define cmpxchg(ptr, o, n) \ + ({ \ + (__typeof__(*(ptr))) __cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ + }) + +/* + * This function doesn't exist, so you'll get a linker error if + * something tries to do an invalidly-sized xchg(). + */ +extern unsigned long __xchg_called_with_bad_pointer(void) + __compiletime_error("Bad argument size for xchg"); + +static inline unsigned long __xchg(volatile void *ptr, unsigned long with, + int size) +{ + switch (size) { + case 1: + case 2: + return xchg_small(ptr, with, size); + case 4: + return xchg_u32(ptr, with); + default: + return __xchg_called_with_bad_pointer(); + } +} + +#define xchg(ptr, with) \ + ({ \ + (__typeof__(*(ptr))) __xchg((ptr), \ + (unsigned long)(with), \ + sizeof(*(ptr))); \ + }) + +#endif /* __ASM_OPENRISC_CMPXCHG_H */ diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h new file mode 100644 index 000000000..4ea0a33eb --- /dev/null +++ b/arch/openrisc/include/asm/cpuinfo.h @@ -0,0 +1,39 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_CPUINFO_H +#define __ASM_OPENRISC_CPUINFO_H + +struct cpuinfo_or1k { + u32 clock_frequency; + + u32 icache_size; + u32 icache_block_size; + u32 icache_ways; + + u32 dcache_size; + u32 dcache_block_size; + u32 dcache_ways; + + u16 coreid; +}; + +extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS]; +extern void setup_cpuinfo(void); + +#endif /* __ASM_OPENRISC_CPUINFO_H */ diff --git a/arch/openrisc/include/asm/delay.h b/arch/openrisc/include/asm/delay.h new file mode 100644 index 000000000..17f8bf5a5 --- /dev/null +++ b/arch/openrisc/include/asm/delay.h @@ -0,0 +1,24 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_DELAY_H +#define __ASM_OPENRISC_DELAY_H + +#include <asm-generic/delay.h> + +extern unsigned long loops_per_jiffy; + +#endif diff --git a/arch/openrisc/include/asm/elf.h b/arch/openrisc/include/asm/elf.h new file mode 100644 index 000000000..d334e204b --- /dev/null +++ b/arch/openrisc/include/asm/elf.h @@ -0,0 +1,65 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __ASM_OPENRISC_ELF_H +#define __ASM_OPENRISC_ELF_H + + +#include <linux/types.h> +#include <uapi/asm/elf.h> + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ + +#define elf_check_arch(x) \ + (((x)->e_machine == EM_OR32) || ((x)->e_machine == EM_OPENRISC)) + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (0x08000000) + +/* + * Enable dump using regset. + * This covers all of general/DSP/FPU regs. + */ +#define CORE_DUMP_USE_REGSET + +#define ELF_EXEC_PAGESIZE 8192 + +extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt); +#define ELF_CORE_COPY_REGS(dest, regs) dump_elf_thread(dest, regs); + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This could be done in userspace, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +#endif diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h new file mode 100644 index 000000000..5a0159546 --- /dev/null +++ b/arch/openrisc/include/asm/fixmap.h @@ -0,0 +1,88 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_FIXMAP_H +#define __ASM_OPENRISC_FIXMAP_H + +/* Why exactly do we need 2 empty pages between the top of the fixed + * addresses and the top of virtual memory? Something is using that + * memory space but not sure what right now... If you find it, leave + * a comment here. + */ +#define FIXADDR_TOP ((unsigned long) (-2*PAGE_SIZE)) + +#include <linux/kernel.h> +#include <linux/bug.h> +#include <asm/page.h> + +/* + * On OpenRISC we use these special fixed_addresses for doing ioremap + * early in the boot process before memory initialization is complete. + * This is used, in particular, by the early serial console code. + * + * It's not really 'fixmap', per se, but fits loosely into the same + * paradigm. + */ +enum fixed_addresses { + /* + * FIX_IOREMAP entries are useful for mapping physical address + * space before ioremap() is useable, e.g. really early in boot + * before kmalloc() is working. + */ +#define FIX_N_IOREMAPS 32 + FIX_IOREMAP_BEGIN, + FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1, + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +/* FIXADDR_BOTTOM might be a better name here... */ +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + /* + * this branch gets completely eliminated after inlining, + * except when someone tries to use fixaddr indices in an + * illegal way. (such as mixing up address types or using + * out-of-range indices). + * + * If it doesn't get removed, the linker will complain + * loudly with a reasonably clear error message.. + */ + if (idx >= __end_of_fixed_addresses) + BUG(); + + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#endif diff --git a/arch/openrisc/include/asm/futex.h b/arch/openrisc/include/asm/futex.h new file mode 100644 index 000000000..618da4a1b --- /dev/null +++ b/arch/openrisc/include/asm/futex.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_OPENRISC_FUTEX_H +#define __ASM_OPENRISC_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/errno.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +({ \ + __asm__ __volatile__ ( \ + "1: l.lwa %0, %2 \n" \ + insn "\n" \ + "2: l.swa %2, %1 \n" \ + " l.bnf 1b \n" \ + " l.ori %1, r0, 0 \n" \ + "3: \n" \ + ".section .fixup,\"ax\" \n" \ + "4: l.j 3b \n" \ + " l.addi %1, r0, %3 \n" \ + ".previous \n" \ + ".section __ex_table,\"a\" \n" \ + ".word 1b,4b,2b,4b \n" \ + ".previous \n" \ + : "=&r" (oldval), "=&r" (ret), "+m" (*uaddr) \ + : "i" (-EFAULT), "r" (oparg) \ + : "cc", "memory" \ + ); \ +}) + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("l.or %1,%4,%4", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("l.add %1,%0,%4", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("l.or %1,%0,%4", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("l.and %1,%0,%4", ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("l.xor %1,%0,%4", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 prev; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( \ + "1: l.lwa %1, %2 \n" \ + " l.sfeq %1, %3 \n" \ + " l.bnf 3f \n" \ + " l.nop \n" \ + "2: l.swa %2, %4 \n" \ + " l.bnf 1b \n" \ + " l.nop \n" \ + "3: \n" \ + ".section .fixup,\"ax\" \n" \ + "4: l.j 3b \n" \ + " l.addi %0, r0, %5 \n" \ + ".previous \n" \ + ".section __ex_table,\"a\" \n" \ + ".word 1b,4b,2b,4b \n" \ + ".previous \n" \ + : "+r" (ret), "=&r" (prev), "+m" (*uaddr) \ + : "r" (oldval), "r" (newval), "i" (-EFAULT) \ + : "cc", "memory" \ + ); + + *uval = prev; + return ret; +} + +#endif /* __KERNEL__ */ + +#endif /* __ASM_OPENRISC_FUTEX_H */ diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h new file mode 100644 index 000000000..6709b28a0 --- /dev/null +++ b/arch/openrisc/include/asm/io.h @@ -0,0 +1,53 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_IO_H +#define __ASM_OPENRISC_IO_H + +/* + * PCI: can we really do 0 here if we have no port IO? + */ +#define IO_SPACE_LIMIT 0 + +/* OpenRISC has no port IO */ +#define HAVE_ARCH_PIO_SIZE 1 +#define PIO_RESERVED 0X0UL +#define PIO_OFFSET 0 +#define PIO_MASK 0 + +#define ioremap_nocache ioremap_nocache +#include <asm-generic/io.h> +#include <asm/pgtable.h> + +extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size, + pgprot_t prot); + +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) +{ + return __ioremap(offset, size, PAGE_KERNEL); +} + +/* #define _PAGE_CI 0x002 */ +static inline void __iomem *ioremap_nocache(phys_addr_t offset, + unsigned long size) +{ + return __ioremap(offset, size, + __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI)); +} + +extern void iounmap(void *addr); +#endif diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h new file mode 100644 index 000000000..eb612b186 --- /dev/null +++ b/arch/openrisc/include/asm/irq.h @@ -0,0 +1,27 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_IRQ_H__ +#define __ASM_OPENRISC_IRQ_H__ + +#define NR_IRQS 32 +#include <asm-generic/irq.h> + +#define NO_IRQ (-1) + +#endif /* __ASM_OPENRISC_IRQ_H__ */ diff --git a/arch/openrisc/include/asm/irqflags.h b/arch/openrisc/include/asm/irqflags.h new file mode 100644 index 000000000..dc86c653d --- /dev/null +++ b/arch/openrisc/include/asm/irqflags.h @@ -0,0 +1,29 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ___ASM_OPENRISC_IRQFLAGS_H +#define ___ASM_OPENRISC_IRQFLAGS_H + +#include <asm/spr_defs.h> + +#define ARCH_IRQ_DISABLED 0x00 +#define ARCH_IRQ_ENABLED (SPR_SR_IEE|SPR_SR_TEE) + +#include <asm-generic/irqflags.h> + +#endif /* ___ASM_OPENRISC_IRQFLAGS_H */ diff --git a/arch/openrisc/include/asm/linkage.h b/arch/openrisc/include/asm/linkage.h new file mode 100644 index 000000000..e26387520 --- /dev/null +++ b/arch/openrisc/include/asm/linkage.h @@ -0,0 +1,25 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_LINKAGE_H +#define __ASM_OPENRISC_LINKAGE_H + +#define __ALIGN .align 0 +#define __ALIGN_STR ".align 0" + +#endif /* __ASM_OPENRISC_LINKAGE_H */ diff --git a/arch/openrisc/include/asm/mmu.h b/arch/openrisc/include/asm/mmu.h new file mode 100644 index 000000000..d069bc2dd --- /dev/null +++ b/arch/openrisc/include/asm/mmu.h @@ -0,0 +1,26 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_MMU_H +#define __ASM_OPENRISC_MMU_H + +#ifndef __ASSEMBLY__ +typedef unsigned long mm_context_t; +#endif + +#endif diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h new file mode 100644 index 000000000..c380d8caf --- /dev/null +++ b/arch/openrisc/include/asm/mmu_context.h @@ -0,0 +1,43 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_MMU_CONTEXT_H +#define __ASM_OPENRISC_MMU_CONTEXT_H + +#include <asm-generic/mm_hooks.h> + +extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +extern void destroy_context(struct mm_struct *mm); +extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk); + +#define deactivate_mm(tsk, mm) do { } while (0) + +#define activate_mm(prev, next) switch_mm((prev), (next), NULL) + +/* current active pgd - this is similar to other processors pgd + * registers like cr3 on the i386 + */ + +extern volatile pgd_t *current_pgd[]; /* defined in arch/openrisc/mm/fault.c */ + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +#endif diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h new file mode 100644 index 000000000..35bcb7cd2 --- /dev/null +++ b/arch/openrisc/include/asm/page.h @@ -0,0 +1,101 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_PAGE_H +#define __ASM_OPENRISC_PAGE_H + + +/* PAGE_SHIFT determines the page size */ + +#define PAGE_SHIFT 13 +#ifdef __ASSEMBLY__ +#define PAGE_SIZE (1 << PAGE_SHIFT) +#else +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#endif +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define PAGE_OFFSET 0xc0000000 +#define KERNELBASE PAGE_OFFSET + +/* This is not necessarily the right place for this, but it's needed by + * drivers/of/fdt.c + */ +#include <asm/setup.h> + +#ifndef __ASSEMBLY__ + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { + unsigned long pte; +} pte_t; +typedef struct { + unsigned long pgd; +} pgd_t; +typedef struct { + unsigned long pgprot; +} pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +#endif /* !__ASSEMBLY__ */ + + +#ifndef __ASSEMBLY__ + +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) +#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +#define virt_to_page(addr) \ + (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) + +#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) + +#define pfn_valid(pfn) ((pfn) < max_mapnr) + +#define virt_addr_valid(kaddr) (pfn_valid(virt_to_pfn(kaddr))) + +#endif /* __ASSEMBLY__ */ + + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* __ASM_OPENRISC_PAGE_H */ diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h new file mode 100644 index 000000000..8999b9226 --- /dev/null +++ b/arch/openrisc/include/asm/pgalloc.h @@ -0,0 +1,111 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_PGALLOC_H +#define __ASM_OPENRISC_PGALLOC_H + +#include <asm/page.h> +#include <linux/threads.h> +#include <linux/mm.h> +#include <linux/memblock.h> + +extern int mem_init_done; + +#define pmd_populate_kernel(mm, pmd, pte) \ + set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte))) + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + struct page *pte) +{ + set_pmd(pmd, __pmd(_KERNPG_TABLE + + ((unsigned long)page_to_pfn(pte) << + (unsigned long) PAGE_SHIFT))); +} + +/* + * Allocate and free page tables. + */ +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL); + + if (ret) { + memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + memcpy(ret + USER_PTRS_PER_PGD, + swapper_pg_dir + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + + } + return ret; +} + +#if 0 +/* FIXME: This seems to be the preferred style, but we are using + * current_pgd (from mm->pgd) to load kernel pages so we need it + * initialized. This needs to be looked into. + */ +extern inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + return (pgd_t *)get_zeroed_page(GFP_KERNEL); +} +#endif + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_page((unsigned long)pgd); +} + +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *pte; + pte = alloc_pages(GFP_KERNEL, 0); + if (!pte) + return NULL; + clear_page(page_address(pte)); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } + return pte; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static inline void pte_free(struct mm_struct *mm, struct page *pte) +{ + pgtable_page_dtor(pte); + __free_page(pte); +} + +#define __pte_free_tlb(tlb, pte, addr) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb), (pte)); \ +} while (0) + +#define pmd_pgtable(pmd) pmd_page(pmd) + +#define check_pgt_cache() do { } while (0) + +#endif diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h new file mode 100644 index 000000000..21c713030 --- /dev/null +++ b/arch/openrisc/include/asm/pgtable.h @@ -0,0 +1,458 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* or32 pgtable.h - macros and functions to manipulate page tables + * + * Based on: + * include/asm-cris/pgtable.h + */ + +#ifndef __ASM_OPENRISC_PGTABLE_H +#define __ASM_OPENRISC_PGTABLE_H + +#define __ARCH_USE_5LEVEL_HACK +#include <asm-generic/pgtable-nopmd.h> + +#ifndef __ASSEMBLY__ +#include <asm/mmu.h> +#include <asm/fixmap.h> + +/* + * The Linux memory management assumes a three-level page table setup. On + * or32, we use that, but "fold" the mid level into the top-level page + * table. Since the MMU TLB is software loaded through an interrupt, it + * supports any page table structure, so we could have used a three-level + * setup, but for the amounts of memory we normally use, a two-level is + * probably more efficient. + * + * This file contains the functions and defines necessary to modify and use + * the or32 page table tree. + */ + +extern void paging_init(void); + +/* Certain architectures need to do special things when pte's + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) +/* + * (pmds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) + +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * entries per page directory level: we use a two-level, so + * we don't really have any PMD directory physically. + * pointers are 4 bytes so we can use the page size and + * divide it by 4 (shift by 2). + */ +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) + +#define PTRS_PER_PGD (1UL << (32-PGDIR_SHIFT)) + +/* calculate how many PGD entries a user-level program can use + * the first mappable virtual address is 0 + * (TASK_SIZE is the maximum virtual address space) + */ + +#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +/* + * Kernels own virtual memory area. + */ + +/* + * The size and location of the vmalloc area are chosen so that modules + * placed in this area aren't more than a 28-bit signed offset from any + * kernel functions that they may need. This greatly simplifies handling + * of the relocations for l.j and l.jal instructions as we don't need to + * introduce any trampolines for reaching "distant" code. + * + * 64 MB of vmalloc area is comparable to what's available on other arches. + */ + +#define VMALLOC_START (PAGE_OFFSET-0x04000000UL) +#define VMALLOC_END (PAGE_OFFSET) +#define VMALLOC_VMADDR(x) ((unsigned long)(x)) + +/* Define some higher level generic page attributes. + * + * If you change _PAGE_CI definition be sure to change it in + * io.h for ioremap_nocache() too. + */ + +/* + * An OR32 PTE looks like this: + * + * | 31 ... 10 | 9 | 8 ... 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * Phys pg.num L PP Index D A WOM WBC CI CC + * + * L : link + * PPI: Page protection index + * D : Dirty + * A : Accessed + * WOM: Weakly ordered memory + * WBC: Write-back cache + * CI : Cache inhibit + * CC : Cache coherent + * + * The protection bits below should correspond to the layout of the actual + * PTE as per above + */ + +#define _PAGE_CC 0x001 /* software: pte contains a translation */ +#define _PAGE_CI 0x002 /* cache inhibit */ +#define _PAGE_WBC 0x004 /* write back cache */ +#define _PAGE_WOM 0x008 /* weakly ordered memory */ + +#define _PAGE_A 0x010 /* accessed */ +#define _PAGE_D 0x020 /* dirty */ +#define _PAGE_URE 0x040 /* user read enable */ +#define _PAGE_UWE 0x080 /* user write enable */ + +#define _PAGE_SRE 0x100 /* superuser read enable */ +#define _PAGE_SWE 0x200 /* superuser write enable */ +#define _PAGE_EXEC 0x400 /* software: page is executable */ +#define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */ + +/* 0x001 is cache coherency bit, which should always be set to + * 1 - for SMP (when we support it) + * 0 - otherwise + * + * we just reuse this bit in software for _PAGE_PRESENT and + * force it to 0 when loading it into TLB. + */ +#define _PAGE_PRESENT _PAGE_CC +#define _PAGE_USER _PAGE_URE +#define _PAGE_WRITE (_PAGE_UWE | _PAGE_SWE) +#define _PAGE_DIRTY _PAGE_D +#define _PAGE_ACCESSED _PAGE_A +#define _PAGE_NO_CACHE _PAGE_CI +#define _PAGE_SHARED _PAGE_U_SHARED +#define _PAGE_READ (_PAGE_URE | _PAGE_SRE) + +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) +#define _PAGE_ALL (_PAGE_PRESENT | _PAGE_ACCESSED) +#define _KERNPG_TABLE \ + (_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY) + +#define PAGE_NONE __pgprot(_PAGE_ALL) +#define PAGE_READONLY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE) +#define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC) +#define PAGE_SHARED \ + __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \ + | _PAGE_SHARED) +#define PAGE_SHARED_X \ + __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \ + | _PAGE_SHARED | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE) +#define PAGE_COPY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC) + +#define PAGE_KERNEL \ + __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ + | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC) +#define PAGE_KERNEL_RO \ + __pgprot(_PAGE_ALL | _PAGE_SRE \ + | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC) +#define PAGE_KERNEL_NOCACHE \ + __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ + | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY_X +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY_X +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY_X +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY_X + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY_X +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED_X +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY_X +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED_X + +/* zero page used for uninitialized stuff */ +extern unsigned long empty_zero_page[2048]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +/* number of bits that fit into a memory pointer */ +#define BITS_PER_PTR (8*sizeof(unsigned long)) + +/* to align the pointer to a pointer address */ +#define PTR_MASK (~(sizeof(void *)-1)) + +/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ +/* 64-bit machines, beware! SRB. */ +#define SIZEOF_PTR_LOG2 2 + +/* to find an entry in a page-table */ +#define PAGE_PTR(address) \ +((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) + +/* to set the page-dir */ +#define SET_PAGE_DIR(tsk, pgdir) + +#define pte_none(x) (!pte_val(x)) +#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) +#define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0) + +#define pmd_none(x) (!pmd_val(x)) +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE) +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ + +static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_special(pte_t pte) { return 0; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE); + return pte; +} + +static inline pte_t pte_rdprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_READ); + return pte; +} + +static inline pte_t pte_exprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_EXEC); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_DIRTY); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + return pte; +} + +static inline pte_t pte_mkread(pte_t pte) +{ + pte_val(pte) |= _PAGE_READ; + return pte; +} + +static inline pte_t pte_mkexec(pte_t pte) +{ + pte_val(pte) |= _PAGE_EXEC; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + return pte; +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ + +/* What actually goes as arguments to the various functions is less than + * obvious, but a rule of thumb is that struct page's goes as struct page *, + * really physical DRAM addresses are unsigned long's, and DRAM "virtual" + * addresses (the 0xc0xxxxxx's) goes as void *'s. + */ + +static inline pte_t __mk_pte(void *page, pgprot_t pgprot) +{ + pte_t pte; + /* the PTE needs a physical address */ + pte_val(pte) = __pa(page) | pgprot_val(pgprot); + return pte; +} + +#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot)) + +#define mk_pte_phys(physpage, pgprot) \ +({ \ + pte_t __pte; \ + \ + pte_val(__pte) = (physpage) + pgprot_val(pgprot); \ + __pte; \ +}) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + + +/* + * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval + * __pte_page(pte_val) refers to the "virtual" DRAM interval + * pte_pagenr refers to the page-number counted starting from the virtual + * DRAM start + */ + +static inline unsigned long __pte_page(pte_t pte) +{ + /* the PTE contains a physical address */ + return (unsigned long)__va(pte_val(pte) & PAGE_MASK); +} + +#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) + +/* permanent address of a page */ + +#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) +#define pte_page(pte) (mem_map+pte_pagenr(pte)) + +/* + * only the pte's themselves need to point to physical DRAM (see above) + * the pagetable links are purely handled within the kernel SW and thus + * don't need the __pa and __va transformations. + */ +static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) +{ + pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep; +} + +#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) +#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + +/* to find an entry in a page-table-directory. */ +#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) + +#define __pgd_offset(address) pgd_index(address) + +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +#define __pmd_offset(address) \ + (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +/* + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] + * + * this macro returns the index of the entry in the pte page which would + * control the given virtual address + */ +#define __pte_offset(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address)) +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) +#define pte_offset_map_nested(dir, address) \ + pte_offset_map(dir, address) + +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) +#define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT) +#define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot)) + +#define pte_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \ + __FILE__, __LINE__, &(e), pte_val(e)) +#define pgd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \ + __FILE__, __LINE__, &(e), pgd_val(e)) + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ + +struct vm_area_struct; + +static inline void update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t *pte) +{ +} + +extern void update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *pte); + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *pte) +{ + update_tlb(vma, address, pte); + update_cache(vma, address, pte); +} + +/* __PHX__ FIXME, SWAP, this probably doesn't work */ + +/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ +/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */ + +#define __swp_type(x) (((x).val >> 5) & 0x7f) +#define __swp_offset(x) ((x).val >> 12) +#define __swp_entry(type, offset) \ + ((swp_entry_t) { ((type) << 5) | ((offset) << 12) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define kern_addr_valid(addr) (1) + +#include <asm-generic/pgtable.h> + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) + +typedef pte_t *pte_addr_t; + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_OPENRISC_PGTABLE_H */ diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h new file mode 100644 index 000000000..af31a9fe7 --- /dev/null +++ b/arch/openrisc/include/asm/processor.h @@ -0,0 +1,90 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_PROCESSOR_H +#define __ASM_OPENRISC_PROCESSOR_H + +#include <asm/spr_defs.h> +#include <asm/page.h> +#include <asm/ptrace.h> + +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP +/* Kernel and user SR register setting */ +#define KERNEL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \ + | SPR_SR_DCE | SPR_SR_SM) +#define USER_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \ + | SPR_SR_DCE | SPR_SR_IEE | SPR_SR_TEE) +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l; }) + +/* + * User space process size. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. + */ + +#define TASK_SIZE (0x80000000UL) + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) + +#ifndef __ASSEMBLY__ + +struct task_struct; + +struct thread_struct { +}; + +/* + * At user->kernel entry, the pt_regs struct is stacked on the top of the + * kernel-stack. This macro allows us to find those regs for a task. + * Notice that subsequent pt_regs stackings, like recursive interrupts + * occurring while we're in the kernel, won't affect this - only the first + * user->kernel transition registers are reached by this (i.e. not regs + * for running signal handler) + */ +#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE - STACK_FRAME_OVERHEAD)) - 1) + +/* + * Dito but for the currently running task + */ + +#define task_pt_regs(task) user_regs(task_thread_info(task)) + +#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) + +#define INIT_THREAD { } + + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) + + +void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); +void release_thread(struct task_struct *); +unsigned long get_wchan(struct task_struct *p); + +#define cpu_relax() barrier() + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_OPENRISC_PROCESSOR_H */ diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h new file mode 100644 index 000000000..6ca17264c --- /dev/null +++ b/arch/openrisc/include/asm/ptrace.h @@ -0,0 +1,124 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __ASM_OPENRISC_PTRACE_H +#define __ASM_OPENRISC_PTRACE_H + + +#include <asm/spr_defs.h> +#include <uapi/asm/ptrace.h> + +/* + * Make kernel PTrace/register structures opaque to userspace... userspace can + * access thread state via the regset mechanism. This allows us a bit of + * flexibility in how we order the registers on the stack, permitting some + * optimizations like packing call-clobbered registers together so that + * they share a cacheline (not done yet, though... future optimization). + */ + +#ifndef __ASSEMBLY__ +/* + * This struct describes how the registers are laid out on the kernel stack + * during a syscall or other kernel entry. + * + * This structure should always be cacheline aligned on the stack. + * FIXME: I don't think that's the case right now. The alignment is + * taken care of elsewhere... head.S, process.c, etc. + */ + +struct pt_regs { + union { + struct { + /* Named registers */ + long sr; /* Stored in place of r0 */ + long sp; /* r1 */ + }; + struct { + /* Old style */ + long offset[2]; + long gprs[30]; + }; + struct { + /* New style */ + long gpr[32]; + }; + }; + long pc; + /* For restarting system calls: + * Set to syscall number for syscall exceptions, + * -1 for all other exceptions. + */ + long orig_gpr11; /* For restarting system calls */ + long dummy; /* Cheap alignment fix */ + long dummy2; /* Cheap alignment fix */ +}; + +/* TODO: Rename this to REDZONE because that's what it is */ +#define STACK_FRAME_OVERHEAD 128 /* size of minimum stack frame */ + +#define instruction_pointer(regs) ((regs)->pc) +#define user_mode(regs) (((regs)->sr & SPR_SR_SM) == 0) +#define user_stack_pointer(regs) ((unsigned long)(regs)->sp) +#define profile_pc(regs) instruction_pointer(regs) + +static inline long regs_return_value(struct pt_regs *regs) +{ + return regs->gpr[11]; +} + +#endif /* __ASSEMBLY__ */ + +/* + * Offsets used by 'ptrace' system call interface. + */ +#define PT_SR 0 +#define PT_SP 4 +#define PT_GPR2 8 +#define PT_GPR3 12 +#define PT_GPR4 16 +#define PT_GPR5 20 +#define PT_GPR6 24 +#define PT_GPR7 28 +#define PT_GPR8 32 +#define PT_GPR9 36 +#define PT_GPR10 40 +#define PT_GPR11 44 +#define PT_GPR12 48 +#define PT_GPR13 52 +#define PT_GPR14 56 +#define PT_GPR15 60 +#define PT_GPR16 64 +#define PT_GPR17 68 +#define PT_GPR18 72 +#define PT_GPR19 76 +#define PT_GPR20 80 +#define PT_GPR21 84 +#define PT_GPR22 88 +#define PT_GPR23 92 +#define PT_GPR24 96 +#define PT_GPR25 100 +#define PT_GPR26 104 +#define PT_GPR27 108 +#define PT_GPR28 112 +#define PT_GPR29 116 +#define PT_GPR30 120 +#define PT_GPR31 124 +#define PT_PC 128 +#define PT_ORIG_GPR11 132 +#define PT_SYSCALLNO 136 + +#endif /* __ASM_OPENRISC_PTRACE_H */ diff --git a/arch/openrisc/include/asm/serial.h b/arch/openrisc/include/asm/serial.h new file mode 100644 index 000000000..cb5932f54 --- /dev/null +++ b/arch/openrisc/include/asm/serial.h @@ -0,0 +1,36 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_SERIAL_H +#define __ASM_OPENRISC_SERIAL_H + +#ifdef __KERNEL__ + +#include <asm/cpuinfo.h> + +/* There's a generic version of this file, but it assumes a 1.8MHz UART clk... + * this, on the other hand, assumes the UART clock is tied to the system + * clock... 8250_early.c (early 8250 serial console) actually uses this, so + * it needs to be correct to get the early console working. + */ + +#define BASE_BAUD (cpuinfo_or1k[smp_processor_id()].clock_frequency/16) + +#endif /* __KERNEL__ */ + +#endif /* __ASM_OPENRISC_SERIAL_H */ diff --git a/arch/openrisc/include/asm/smp.h b/arch/openrisc/include/asm/smp.h new file mode 100644 index 000000000..e21d2f12b --- /dev/null +++ b/arch/openrisc/include/asm/smp.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __ASM_OPENRISC_SMP_H +#define __ASM_OPENRISC_SMP_H + +#include <asm/spr.h> +#include <asm/spr_defs.h> + +#define raw_smp_processor_id() (current_thread_info()->cpu) +#define hard_smp_processor_id() mfspr(SPR_COREID) + +extern void smp_init_cpus(void); + +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); +extern void handle_IPI(unsigned int ipi_msg); + +#endif /* __ASM_OPENRISC_SMP_H */ diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h new file mode 100644 index 000000000..9b761e0e2 --- /dev/null +++ b/arch/openrisc/include/asm/spinlock.h @@ -0,0 +1,34 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_SPINLOCK_H +#define __ASM_OPENRISC_SPINLOCK_H + +#include <asm/qspinlock.h> + +#include <asm/qrwlock.h> + +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) + +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() + + +#endif diff --git a/arch/openrisc/include/asm/spinlock_types.h b/arch/openrisc/include/asm/spinlock_types.h new file mode 100644 index 000000000..7c6fb1208 --- /dev/null +++ b/arch/openrisc/include/asm/spinlock_types.h @@ -0,0 +1,7 @@ +#ifndef _ASM_OPENRISC_SPINLOCK_TYPES_H +#define _ASM_OPENRISC_SPINLOCK_TYPES_H + +#include <asm/qspinlock_types.h> +#include <asm/qrwlock_types.h> + +#endif /* _ASM_OPENRISC_SPINLOCK_TYPES_H */ diff --git a/arch/openrisc/include/asm/spr.h b/arch/openrisc/include/asm/spr.h new file mode 100644 index 000000000..1cccb42dd --- /dev/null +++ b/arch/openrisc/include/asm/spr.h @@ -0,0 +1,42 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_SPR_H +#define __ASM_OPENRISC_SPR_H + +#define mtspr(_spr, _val) __asm__ __volatile__ ( \ + "l.mtspr r0,%1,%0" \ + : : "K" (_spr), "r" (_val)) +#define mtspr_off(_spr, _off, _val) __asm__ __volatile__ ( \ + "l.mtspr %0,%1,%2" \ + : : "r" (_off), "r" (_val), "K" (_spr)) + +static inline unsigned long mfspr(unsigned long add) +{ + unsigned long ret; + __asm__ __volatile__ ("l.mfspr %0,r0,%1" : "=r" (ret) : "K" (add)); + return ret; +} + +static inline unsigned long mfspr_off(unsigned long add, unsigned long offset) +{ + unsigned long ret; + __asm__ __volatile__ ("l.mfspr %0,%1,%2" : "=r" (ret) + : "r" (offset), "K" (add)); + return ret; +} + +#endif diff --git a/arch/openrisc/include/asm/spr_defs.h b/arch/openrisc/include/asm/spr_defs.h new file mode 100644 index 000000000..154b5a1ee --- /dev/null +++ b/arch/openrisc/include/asm/spr_defs.h @@ -0,0 +1,618 @@ +/* + * OpenRISC Linux + * + * SPR Definitions + * + * Copyright (C) 2000 Damjan Lampret + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2008, 2010 Embecosm Limited + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This file is part of OpenRISC 1000 Architectural Simulator. + */ + +#ifndef SPR_DEFS__H +#define SPR_DEFS__H + +/* Definition of special-purpose registers (SPRs). */ + +#define MAX_GRPS (32) +#define MAX_SPRS_PER_GRP_BITS (11) +#define MAX_SPRS_PER_GRP (1 << MAX_SPRS_PER_GRP_BITS) +#define MAX_SPRS (0x10000) + +/* Base addresses for the groups */ +#define SPRGROUP_SYS (0 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_DMMU (1 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_IMMU (2 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_DC (3 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_IC (4 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_MAC (5 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_D (6 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_PC (7 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_PM (8 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_PIC (9 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_TT (10 << MAX_SPRS_PER_GRP_BITS) +#define SPRGROUP_FP (11 << MAX_SPRS_PER_GRP_BITS) + +/* System control and status group */ +#define SPR_VR (SPRGROUP_SYS + 0) +#define SPR_UPR (SPRGROUP_SYS + 1) +#define SPR_CPUCFGR (SPRGROUP_SYS + 2) +#define SPR_DMMUCFGR (SPRGROUP_SYS + 3) +#define SPR_IMMUCFGR (SPRGROUP_SYS + 4) +#define SPR_DCCFGR (SPRGROUP_SYS + 5) +#define SPR_ICCFGR (SPRGROUP_SYS + 6) +#define SPR_DCFGR (SPRGROUP_SYS + 7) +#define SPR_PCCFGR (SPRGROUP_SYS + 8) +#define SPR_VR2 (SPRGROUP_SYS + 9) +#define SPR_AVR (SPRGROUP_SYS + 10) +#define SPR_EVBAR (SPRGROUP_SYS + 11) +#define SPR_AECR (SPRGROUP_SYS + 12) +#define SPR_AESR (SPRGROUP_SYS + 13) +#define SPR_NPC (SPRGROUP_SYS + 16) /* CZ 21/06/01 */ +#define SPR_SR (SPRGROUP_SYS + 17) /* CZ 21/06/01 */ +#define SPR_PPC (SPRGROUP_SYS + 18) /* CZ 21/06/01 */ +#define SPR_FPCSR (SPRGROUP_SYS + 20) /* CZ 21/06/01 */ +#define SPR_EPCR_BASE (SPRGROUP_SYS + 32) /* CZ 21/06/01 */ +#define SPR_EPCR_LAST (SPRGROUP_SYS + 47) /* CZ 21/06/01 */ +#define SPR_EEAR_BASE (SPRGROUP_SYS + 48) +#define SPR_EEAR_LAST (SPRGROUP_SYS + 63) +#define SPR_ESR_BASE (SPRGROUP_SYS + 64) +#define SPR_ESR_LAST (SPRGROUP_SYS + 79) +#define SPR_COREID (SPRGROUP_SYS + 128) +#define SPR_NUMCORES (SPRGROUP_SYS + 129) +#define SPR_GPR_BASE (SPRGROUP_SYS + 1024) + +/* Data MMU group */ +#define SPR_DMMUCR (SPRGROUP_DMMU + 0) +#define SPR_DTLBEIR (SPRGROUP_DMMU + 2) +#define SPR_DTLBMR_BASE(WAY) (SPRGROUP_DMMU + 0x200 + (WAY) * 0x100) +#define SPR_DTLBMR_LAST(WAY) (SPRGROUP_DMMU + 0x27f + (WAY) * 0x100) +#define SPR_DTLBTR_BASE(WAY) (SPRGROUP_DMMU + 0x280 + (WAY) * 0x100) +#define SPR_DTLBTR_LAST(WAY) (SPRGROUP_DMMU + 0x2ff + (WAY) * 0x100) + +/* Instruction MMU group */ +#define SPR_IMMUCR (SPRGROUP_IMMU + 0) +#define SPR_ITLBEIR (SPRGROUP_IMMU + 2) +#define SPR_ITLBMR_BASE(WAY) (SPRGROUP_IMMU + 0x200 + (WAY) * 0x100) +#define SPR_ITLBMR_LAST(WAY) (SPRGROUP_IMMU + 0x27f + (WAY) * 0x100) +#define SPR_ITLBTR_BASE(WAY) (SPRGROUP_IMMU + 0x280 + (WAY) * 0x100) +#define SPR_ITLBTR_LAST(WAY) (SPRGROUP_IMMU + 0x2ff + (WAY) * 0x100) + +/* Data cache group */ +#define SPR_DCCR (SPRGROUP_DC + 0) +#define SPR_DCBPR (SPRGROUP_DC + 1) +#define SPR_DCBFR (SPRGROUP_DC + 2) +#define SPR_DCBIR (SPRGROUP_DC + 3) +#define SPR_DCBWR (SPRGROUP_DC + 4) +#define SPR_DCBLR (SPRGROUP_DC + 5) +#define SPR_DCR_BASE(WAY) (SPRGROUP_DC + 0x200 + (WAY) * 0x200) +#define SPR_DCR_LAST(WAY) (SPRGROUP_DC + 0x3ff + (WAY) * 0x200) + +/* Instruction cache group */ +#define SPR_ICCR (SPRGROUP_IC + 0) +#define SPR_ICBPR (SPRGROUP_IC + 1) +#define SPR_ICBIR (SPRGROUP_IC + 2) +#define SPR_ICBLR (SPRGROUP_IC + 3) +#define SPR_ICR_BASE(WAY) (SPRGROUP_IC + 0x200 + (WAY) * 0x200) +#define SPR_ICR_LAST(WAY) (SPRGROUP_IC + 0x3ff + (WAY) * 0x200) + +/* MAC group */ +#define SPR_MACLO (SPRGROUP_MAC + 1) +#define SPR_MACHI (SPRGROUP_MAC + 2) + +/* Debug group */ +#define SPR_DVR(N) (SPRGROUP_D + (N)) +#define SPR_DCR(N) (SPRGROUP_D + 8 + (N)) +#define SPR_DMR1 (SPRGROUP_D + 16) +#define SPR_DMR2 (SPRGROUP_D + 17) +#define SPR_DWCR0 (SPRGROUP_D + 18) +#define SPR_DWCR1 (SPRGROUP_D + 19) +#define SPR_DSR (SPRGROUP_D + 20) +#define SPR_DRR (SPRGROUP_D + 21) + +/* Performance counters group */ +#define SPR_PCCR(N) (SPRGROUP_PC + (N)) +#define SPR_PCMR(N) (SPRGROUP_PC + 8 + (N)) + +/* Power management group */ +#define SPR_PMR (SPRGROUP_PM + 0) + +/* PIC group */ +#define SPR_PICMR (SPRGROUP_PIC + 0) +#define SPR_PICPR (SPRGROUP_PIC + 1) +#define SPR_PICSR (SPRGROUP_PIC + 2) + +/* Tick Timer group */ +#define SPR_TTMR (SPRGROUP_TT + 0) +#define SPR_TTCR (SPRGROUP_TT + 1) + +/* + * Bit definitions for the Version Register + * + */ +#define SPR_VR_VER 0xff000000 /* Processor version */ +#define SPR_VR_CFG 0x00ff0000 /* Processor configuration */ +#define SPR_VR_RES 0x0000ffc0 /* Reserved */ +#define SPR_VR_REV 0x0000003f /* Processor revision */ +#define SPR_VR_UVRP 0x00000040 /* Updated Version Registers Present */ + +#define SPR_VR_VER_OFF 24 +#define SPR_VR_CFG_OFF 16 +#define SPR_VR_REV_OFF 0 + +/* + * Bit definitions for the Version Register 2 + */ +#define SPR_VR2_CPUID 0xff000000 /* Processor ID */ +#define SPR_VR2_VER 0x00ffffff /* Processor version */ + +/* + * Bit definitions for the Unit Present Register + * + */ +#define SPR_UPR_UP 0x00000001 /* UPR present */ +#define SPR_UPR_DCP 0x00000002 /* Data cache present */ +#define SPR_UPR_ICP 0x00000004 /* Instruction cache present */ +#define SPR_UPR_DMP 0x00000008 /* Data MMU present */ +#define SPR_UPR_IMP 0x00000010 /* Instruction MMU present */ +#define SPR_UPR_MP 0x00000020 /* MAC present */ +#define SPR_UPR_DUP 0x00000040 /* Debug unit present */ +#define SPR_UPR_PCUP 0x00000080 /* Performance counters unit present */ +#define SPR_UPR_PICP 0x00000100 /* PIC present */ +#define SPR_UPR_PMP 0x00000200 /* Power management present */ +#define SPR_UPR_TTP 0x00000400 /* Tick timer present */ +#define SPR_UPR_RES 0x00fe0000 /* Reserved */ +#define SPR_UPR_CUP 0xff000000 /* Context units present */ + +/* + * JPB: Bit definitions for the CPU configuration register + * + */ +#define SPR_CPUCFGR_NSGF 0x0000000f /* Number of shadow GPR files */ +#define SPR_CPUCFGR_CGF 0x00000010 /* Custom GPR file */ +#define SPR_CPUCFGR_OB32S 0x00000020 /* ORBIS32 supported */ +#define SPR_CPUCFGR_OB64S 0x00000040 /* ORBIS64 supported */ +#define SPR_CPUCFGR_OF32S 0x00000080 /* ORFPX32 supported */ +#define SPR_CPUCFGR_OF64S 0x00000100 /* ORFPX64 supported */ +#define SPR_CPUCFGR_OV64S 0x00000200 /* ORVDX64 supported */ +#define SPR_CPUCFGR_RES 0xfffffc00 /* Reserved */ + +/* + * JPB: Bit definitions for the Debug configuration register and other + * constants. + * + */ + +#define SPR_DCFGR_NDP 0x00000007 /* Number of matchpoints mask */ +#define SPR_DCFGR_NDP1 0x00000000 /* One matchpoint supported */ +#define SPR_DCFGR_NDP2 0x00000001 /* Two matchpoints supported */ +#define SPR_DCFGR_NDP3 0x00000002 /* Three matchpoints supported */ +#define SPR_DCFGR_NDP4 0x00000003 /* Four matchpoints supported */ +#define SPR_DCFGR_NDP5 0x00000004 /* Five matchpoints supported */ +#define SPR_DCFGR_NDP6 0x00000005 /* Six matchpoints supported */ +#define SPR_DCFGR_NDP7 0x00000006 /* Seven matchpoints supported */ +#define SPR_DCFGR_NDP8 0x00000007 /* Eight matchpoints supported */ +#define SPR_DCFGR_WPCI 0x00000008 /* Watchpoint counters implemented */ + +#define MATCHPOINTS_TO_NDP(n) (1 == n ? SPR_DCFGR_NDP1 : \ + 2 == n ? SPR_DCFGR_NDP2 : \ + 3 == n ? SPR_DCFGR_NDP3 : \ + 4 == n ? SPR_DCFGR_NDP4 : \ + 5 == n ? SPR_DCFGR_NDP5 : \ + 6 == n ? SPR_DCFGR_NDP6 : \ + 7 == n ? SPR_DCFGR_NDP7 : SPR_DCFGR_NDP8) +#define MAX_MATCHPOINTS 8 +#define MAX_WATCHPOINTS (MAX_MATCHPOINTS + 2) + +/* + * Bit definitions for the Supervision Register + * + */ +#define SPR_SR_SM 0x00000001 /* Supervisor Mode */ +#define SPR_SR_TEE 0x00000002 /* Tick timer Exception Enable */ +#define SPR_SR_IEE 0x00000004 /* Interrupt Exception Enable */ +#define SPR_SR_DCE 0x00000008 /* Data Cache Enable */ +#define SPR_SR_ICE 0x00000010 /* Instruction Cache Enable */ +#define SPR_SR_DME 0x00000020 /* Data MMU Enable */ +#define SPR_SR_IME 0x00000040 /* Instruction MMU Enable */ +#define SPR_SR_LEE 0x00000080 /* Little Endian Enable */ +#define SPR_SR_CE 0x00000100 /* CID Enable */ +#define SPR_SR_F 0x00000200 /* Condition Flag */ +#define SPR_SR_CY 0x00000400 /* Carry flag */ +#define SPR_SR_OV 0x00000800 /* Overflow flag */ +#define SPR_SR_OVE 0x00001000 /* Overflow flag Exception */ +#define SPR_SR_DSX 0x00002000 /* Delay Slot Exception */ +#define SPR_SR_EPH 0x00004000 /* Exception Prefix High */ +#define SPR_SR_FO 0x00008000 /* Fixed one */ +#define SPR_SR_SUMRA 0x00010000 /* Supervisor SPR read access */ +#define SPR_SR_RES 0x0ffe0000 /* Reserved */ +#define SPR_SR_CID 0xf0000000 /* Context ID */ + +/* + * Bit definitions for the Data MMU Control Register + * + */ +#define SPR_DMMUCR_P2S 0x0000003e /* Level 2 Page Size */ +#define SPR_DMMUCR_P1S 0x000007c0 /* Level 1 Page Size */ +#define SPR_DMMUCR_VADDR_WIDTH 0x0000f800 /* Virtual ADDR Width */ +#define SPR_DMMUCR_PADDR_WIDTH 0x000f0000 /* Physical ADDR Width */ + +/* + * Bit definitions for the Instruction MMU Control Register + * + */ +#define SPR_IMMUCR_P2S 0x0000003e /* Level 2 Page Size */ +#define SPR_IMMUCR_P1S 0x000007c0 /* Level 1 Page Size */ +#define SPR_IMMUCR_VADDR_WIDTH 0x0000f800 /* Virtual ADDR Width */ +#define SPR_IMMUCR_PADDR_WIDTH 0x000f0000 /* Physical ADDR Width */ + +/* + * Bit definitions for the Data TLB Match Register + * + */ +#define SPR_DTLBMR_V 0x00000001 /* Valid */ +#define SPR_DTLBMR_PL1 0x00000002 /* Page Level 1 (if 0 then PL2) */ +#define SPR_DTLBMR_CID 0x0000003c /* Context ID */ +#define SPR_DTLBMR_LRU 0x000000c0 /* Least Recently Used */ +#define SPR_DTLBMR_VPN 0xfffff000 /* Virtual Page Number */ + +/* + * Bit definitions for the Data TLB Translate Register + * + */ +#define SPR_DTLBTR_CC 0x00000001 /* Cache Coherency */ +#define SPR_DTLBTR_CI 0x00000002 /* Cache Inhibit */ +#define SPR_DTLBTR_WBC 0x00000004 /* Write-Back Cache */ +#define SPR_DTLBTR_WOM 0x00000008 /* Weakly-Ordered Memory */ +#define SPR_DTLBTR_A 0x00000010 /* Accessed */ +#define SPR_DTLBTR_D 0x00000020 /* Dirty */ +#define SPR_DTLBTR_URE 0x00000040 /* User Read Enable */ +#define SPR_DTLBTR_UWE 0x00000080 /* User Write Enable */ +#define SPR_DTLBTR_SRE 0x00000100 /* Supervisor Read Enable */ +#define SPR_DTLBTR_SWE 0x00000200 /* Supervisor Write Enable */ +#define SPR_DTLBTR_PPN 0xfffff000 /* Physical Page Number */ + +/* + * Bit definitions for the Instruction TLB Match Register + * + */ +#define SPR_ITLBMR_V 0x00000001 /* Valid */ +#define SPR_ITLBMR_PL1 0x00000002 /* Page Level 1 (if 0 then PL2) */ +#define SPR_ITLBMR_CID 0x0000003c /* Context ID */ +#define SPR_ITLBMR_LRU 0x000000c0 /* Least Recently Used */ +#define SPR_ITLBMR_VPN 0xfffff000 /* Virtual Page Number */ + +/* + * Bit definitions for the Instruction TLB Translate Register + * + */ +#define SPR_ITLBTR_CC 0x00000001 /* Cache Coherency */ +#define SPR_ITLBTR_CI 0x00000002 /* Cache Inhibit */ +#define SPR_ITLBTR_WBC 0x00000004 /* Write-Back Cache */ +#define SPR_ITLBTR_WOM 0x00000008 /* Weakly-Ordered Memory */ +#define SPR_ITLBTR_A 0x00000010 /* Accessed */ +#define SPR_ITLBTR_D 0x00000020 /* Dirty */ +#define SPR_ITLBTR_SXE 0x00000040 /* User Read Enable */ +#define SPR_ITLBTR_UXE 0x00000080 /* User Write Enable */ +#define SPR_ITLBTR_PPN 0xfffff000 /* Physical Page Number */ + +/* + * Bit definitions for Data Cache Control register + * + */ +#define SPR_DCCR_EW 0x000000ff /* Enable ways */ + +/* + * Bit definitions for Insn Cache Control register + * + */ +#define SPR_ICCR_EW 0x000000ff /* Enable ways */ + +/* + * Bit definitions for Data Cache Configuration Register + * + */ + +#define SPR_DCCFGR_NCW 0x00000007 +#define SPR_DCCFGR_NCS 0x00000078 +#define SPR_DCCFGR_CBS 0x00000080 +#define SPR_DCCFGR_CWS 0x00000100 +#define SPR_DCCFGR_CCRI 0x00000200 +#define SPR_DCCFGR_CBIRI 0x00000400 +#define SPR_DCCFGR_CBPRI 0x00000800 +#define SPR_DCCFGR_CBLRI 0x00001000 +#define SPR_DCCFGR_CBFRI 0x00002000 +#define SPR_DCCFGR_CBWBRI 0x00004000 + +#define SPR_DCCFGR_NCW_OFF 0 +#define SPR_DCCFGR_NCS_OFF 3 +#define SPR_DCCFGR_CBS_OFF 7 + +/* + * Bit definitions for Instruction Cache Configuration Register + * + */ +#define SPR_ICCFGR_NCW 0x00000007 +#define SPR_ICCFGR_NCS 0x00000078 +#define SPR_ICCFGR_CBS 0x00000080 +#define SPR_ICCFGR_CCRI 0x00000200 +#define SPR_ICCFGR_CBIRI 0x00000400 +#define SPR_ICCFGR_CBPRI 0x00000800 +#define SPR_ICCFGR_CBLRI 0x00001000 + +#define SPR_ICCFGR_NCW_OFF 0 +#define SPR_ICCFGR_NCS_OFF 3 +#define SPR_ICCFGR_CBS_OFF 7 + +/* + * Bit definitions for Data MMU Configuration Register + * + */ + +#define SPR_DMMUCFGR_NTW 0x00000003 +#define SPR_DMMUCFGR_NTS 0x0000001C +#define SPR_DMMUCFGR_NAE 0x000000E0 +#define SPR_DMMUCFGR_CRI 0x00000100 +#define SPR_DMMUCFGR_PRI 0x00000200 +#define SPR_DMMUCFGR_TEIRI 0x00000400 +#define SPR_DMMUCFGR_HTR 0x00000800 + +#define SPR_DMMUCFGR_NTW_OFF 0 +#define SPR_DMMUCFGR_NTS_OFF 2 + +/* + * Bit definitions for Instruction MMU Configuration Register + * + */ + +#define SPR_IMMUCFGR_NTW 0x00000003 +#define SPR_IMMUCFGR_NTS 0x0000001C +#define SPR_IMMUCFGR_NAE 0x000000E0 +#define SPR_IMMUCFGR_CRI 0x00000100 +#define SPR_IMMUCFGR_PRI 0x00000200 +#define SPR_IMMUCFGR_TEIRI 0x00000400 +#define SPR_IMMUCFGR_HTR 0x00000800 + +#define SPR_IMMUCFGR_NTW_OFF 0 +#define SPR_IMMUCFGR_NTS_OFF 2 + +/* + * Bit definitions for Debug Control registers + * + */ +#define SPR_DCR_DP 0x00000001 /* DVR/DCR present */ +#define SPR_DCR_CC 0x0000000e /* Compare condition */ +#define SPR_DCR_SC 0x00000010 /* Signed compare */ +#define SPR_DCR_CT 0x000000e0 /* Compare to */ + +/* Bit results with SPR_DCR_CC mask */ +#define SPR_DCR_CC_MASKED 0x00000000 +#define SPR_DCR_CC_EQUAL 0x00000002 +#define SPR_DCR_CC_LESS 0x00000004 +#define SPR_DCR_CC_LESSE 0x00000006 +#define SPR_DCR_CC_GREAT 0x00000008 +#define SPR_DCR_CC_GREATE 0x0000000a +#define SPR_DCR_CC_NEQUAL 0x0000000c + +/* Bit results with SPR_DCR_CT mask */ +#define SPR_DCR_CT_DISABLED 0x00000000 +#define SPR_DCR_CT_IFEA 0x00000020 +#define SPR_DCR_CT_LEA 0x00000040 +#define SPR_DCR_CT_SEA 0x00000060 +#define SPR_DCR_CT_LD 0x00000080 +#define SPR_DCR_CT_SD 0x000000a0 +#define SPR_DCR_CT_LSEA 0x000000c0 +#define SPR_DCR_CT_LSD 0x000000e0 +/* SPR_DCR_CT_LSD doesn't seem to be implemented anywhere in or1ksim. 2004-1-30 HP */ + +/* + * Bit definitions for Debug Mode 1 register + * + */ +#define SPR_DMR1_CW 0x000fffff /* Chain register pair data */ +#define SPR_DMR1_CW0_AND 0x00000001 +#define SPR_DMR1_CW0_OR 0x00000002 +#define SPR_DMR1_CW0 (SPR_DMR1_CW0_AND | SPR_DMR1_CW0_OR) +#define SPR_DMR1_CW1_AND 0x00000004 +#define SPR_DMR1_CW1_OR 0x00000008 +#define SPR_DMR1_CW1 (SPR_DMR1_CW1_AND | SPR_DMR1_CW1_OR) +#define SPR_DMR1_CW2_AND 0x00000010 +#define SPR_DMR1_CW2_OR 0x00000020 +#define SPR_DMR1_CW2 (SPR_DMR1_CW2_AND | SPR_DMR1_CW2_OR) +#define SPR_DMR1_CW3_AND 0x00000040 +#define SPR_DMR1_CW3_OR 0x00000080 +#define SPR_DMR1_CW3 (SPR_DMR1_CW3_AND | SPR_DMR1_CW3_OR) +#define SPR_DMR1_CW4_AND 0x00000100 +#define SPR_DMR1_CW4_OR 0x00000200 +#define SPR_DMR1_CW4 (SPR_DMR1_CW4_AND | SPR_DMR1_CW4_OR) +#define SPR_DMR1_CW5_AND 0x00000400 +#define SPR_DMR1_CW5_OR 0x00000800 +#define SPR_DMR1_CW5 (SPR_DMR1_CW5_AND | SPR_DMR1_CW5_OR) +#define SPR_DMR1_CW6_AND 0x00001000 +#define SPR_DMR1_CW6_OR 0x00002000 +#define SPR_DMR1_CW6 (SPR_DMR1_CW6_AND | SPR_DMR1_CW6_OR) +#define SPR_DMR1_CW7_AND 0x00004000 +#define SPR_DMR1_CW7_OR 0x00008000 +#define SPR_DMR1_CW7 (SPR_DMR1_CW7_AND | SPR_DMR1_CW7_OR) +#define SPR_DMR1_CW8_AND 0x00010000 +#define SPR_DMR1_CW8_OR 0x00020000 +#define SPR_DMR1_CW8 (SPR_DMR1_CW8_AND | SPR_DMR1_CW8_OR) +#define SPR_DMR1_CW9_AND 0x00040000 +#define SPR_DMR1_CW9_OR 0x00080000 +#define SPR_DMR1_CW9 (SPR_DMR1_CW9_AND | SPR_DMR1_CW9_OR) +#define SPR_DMR1_RES1 0x00300000 /* Reserved */ +#define SPR_DMR1_ST 0x00400000 /* Single-step trace*/ +#define SPR_DMR1_BT 0x00800000 /* Branch trace */ +#define SPR_DMR1_RES2 0xff000000 /* Reserved */ + +/* + * Bit definitions for Debug Mode 2 register. AWTC and WGB corrected by JPB + * + */ +#define SPR_DMR2_WCE0 0x00000001 /* Watchpoint counter 0 enable */ +#define SPR_DMR2_WCE1 0x00000002 /* Watchpoint counter 0 enable */ +#define SPR_DMR2_AWTC 0x00000ffc /* Assign watchpoints to counters */ +#define SPR_DMR2_AWTC_OFF 2 /* Bit offset to AWTC field */ +#define SPR_DMR2_WGB 0x003ff000 /* Watchpoints generating breakpoint */ +#define SPR_DMR2_WGB_OFF 12 /* Bit offset to WGB field */ +#define SPR_DMR2_WBS 0xffc00000 /* JPB: Watchpoint status */ +#define SPR_DMR2_WBS_OFF 22 /* Bit offset to WBS field */ + +/* + * Bit definitions for Debug watchpoint counter registers + * + */ +#define SPR_DWCR_COUNT 0x0000ffff /* Count */ +#define SPR_DWCR_MATCH 0xffff0000 /* Match */ +#define SPR_DWCR_MATCH_OFF 16 /* Match bit offset */ + +/* + * Bit definitions for Debug stop register + * + */ +#define SPR_DSR_RSTE 0x00000001 /* Reset exception */ +#define SPR_DSR_BUSEE 0x00000002 /* Bus error exception */ +#define SPR_DSR_DPFE 0x00000004 /* Data Page Fault exception */ +#define SPR_DSR_IPFE 0x00000008 /* Insn Page Fault exception */ +#define SPR_DSR_TTE 0x00000010 /* Tick Timer exception */ +#define SPR_DSR_AE 0x00000020 /* Alignment exception */ +#define SPR_DSR_IIE 0x00000040 /* Illegal Instruction exception */ +#define SPR_DSR_IE 0x00000080 /* Interrupt exception */ +#define SPR_DSR_DME 0x00000100 /* DTLB miss exception */ +#define SPR_DSR_IME 0x00000200 /* ITLB miss exception */ +#define SPR_DSR_RE 0x00000400 /* Range exception */ +#define SPR_DSR_SCE 0x00000800 /* System call exception */ +#define SPR_DSR_FPE 0x00001000 /* Floating Point Exception */ +#define SPR_DSR_TE 0x00002000 /* Trap exception */ + +/* + * Bit definitions for Debug reason register + * + */ +#define SPR_DRR_RSTE 0x00000001 /* Reset exception */ +#define SPR_DRR_BUSEE 0x00000002 /* Bus error exception */ +#define SPR_DRR_DPFE 0x00000004 /* Data Page Fault exception */ +#define SPR_DRR_IPFE 0x00000008 /* Insn Page Fault exception */ +#define SPR_DRR_TTE 0x00000010 /* Tick Timer exception */ +#define SPR_DRR_AE 0x00000020 /* Alignment exception */ +#define SPR_DRR_IIE 0x00000040 /* Illegal Instruction exception */ +#define SPR_DRR_IE 0x00000080 /* Interrupt exception */ +#define SPR_DRR_DME 0x00000100 /* DTLB miss exception */ +#define SPR_DRR_IME 0x00000200 /* ITLB miss exception */ +#define SPR_DRR_RE 0x00000400 /* Range exception */ +#define SPR_DRR_SCE 0x00000800 /* System call exception */ +#define SPR_DRR_FPE 0x00001000 /* Floating Point Exception */ +#define SPR_DRR_TE 0x00002000 /* Trap exception */ + +/* + * Bit definitions for Performance counters mode registers + * + */ +#define SPR_PCMR_CP 0x00000001 /* Counter present */ +#define SPR_PCMR_UMRA 0x00000002 /* User mode read access */ +#define SPR_PCMR_CISM 0x00000004 /* Count in supervisor mode */ +#define SPR_PCMR_CIUM 0x00000008 /* Count in user mode */ +#define SPR_PCMR_LA 0x00000010 /* Load access event */ +#define SPR_PCMR_SA 0x00000020 /* Store access event */ +#define SPR_PCMR_IF 0x00000040 /* Instruction fetch event*/ +#define SPR_PCMR_DCM 0x00000080 /* Data cache miss event */ +#define SPR_PCMR_ICM 0x00000100 /* Insn cache miss event */ +#define SPR_PCMR_IFS 0x00000200 /* Insn fetch stall event */ +#define SPR_PCMR_LSUS 0x00000400 /* LSU stall event */ +#define SPR_PCMR_BS 0x00000800 /* Branch stall event */ +#define SPR_PCMR_DTLBM 0x00001000 /* DTLB miss event */ +#define SPR_PCMR_ITLBM 0x00002000 /* ITLB miss event */ +#define SPR_PCMR_DDS 0x00004000 /* Data dependency stall event */ +#define SPR_PCMR_WPE 0x03ff8000 /* Watchpoint events */ + +/* + * Bit definitions for the Power management register + * + */ +#define SPR_PMR_SDF 0x0000000f /* Slow down factor */ +#define SPR_PMR_DME 0x00000010 /* Doze mode enable */ +#define SPR_PMR_SME 0x00000020 /* Sleep mode enable */ +#define SPR_PMR_DCGE 0x00000040 /* Dynamic clock gating enable */ +#define SPR_PMR_SUME 0x00000080 /* Suspend mode enable */ + +/* + * Bit definitions for PICMR + * + */ +#define SPR_PICMR_IUM 0xfffffffc /* Interrupt unmask */ + +/* + * Bit definitions for PICPR + * + */ +#define SPR_PICPR_IPRIO 0xfffffffc /* Interrupt priority */ + +/* + * Bit definitions for PICSR + * + */ +#define SPR_PICSR_IS 0xffffffff /* Interrupt status */ + +/* + * Bit definitions for Tick Timer Control Register + * + */ + +#define SPR_TTCR_CNT 0xffffffff /* Count, time period */ +#define SPR_TTMR_TP 0x0fffffff /* Time period */ +#define SPR_TTMR_IP 0x10000000 /* Interrupt Pending */ +#define SPR_TTMR_IE 0x20000000 /* Interrupt Enable */ +#define SPR_TTMR_DI 0x00000000 /* Disabled */ +#define SPR_TTMR_RT 0x40000000 /* Restart tick */ +#define SPR_TTMR_SR 0x80000000 /* Single run */ +#define SPR_TTMR_CR 0xc0000000 /* Continuous run */ +#define SPR_TTMR_M 0xc0000000 /* Tick mode */ + +/* + * Bit definitions for the FP Control Status Register + * + */ +#define SPR_FPCSR_FPEE 0x00000001 /* Floating Point Exception Enable */ +#define SPR_FPCSR_RM 0x00000006 /* Rounding Mode */ +#define SPR_FPCSR_OVF 0x00000008 /* Overflow Flag */ +#define SPR_FPCSR_UNF 0x00000010 /* Underflow Flag */ +#define SPR_FPCSR_SNF 0x00000020 /* SNAN Flag */ +#define SPR_FPCSR_QNF 0x00000040 /* QNAN Flag */ +#define SPR_FPCSR_ZF 0x00000080 /* Zero Flag */ +#define SPR_FPCSR_IXF 0x00000100 /* Inexact Flag */ +#define SPR_FPCSR_IVF 0x00000200 /* Invalid Flag */ +#define SPR_FPCSR_INF 0x00000400 /* Infinity Flag */ +#define SPR_FPCSR_DZF 0x00000800 /* Divide By Zero Flag */ +#define SPR_FPCSR_ALLF (SPR_FPCSR_OVF | SPR_FPCSR_UNF | SPR_FPCSR_SNF | \ + SPR_FPCSR_QNF | SPR_FPCSR_ZF | SPR_FPCSR_IXF | \ + SPR_FPCSR_IVF | SPR_FPCSR_INF | SPR_FPCSR_DZF) + +#define FPCSR_RM_RN (0<<1) +#define FPCSR_RM_RZ (1<<1) +#define FPCSR_RM_RIP (2<<1) +#define FPCSR_RM_RIN (3<<1) + +/* + * l.nop constants + * + */ +#define NOP_NOP 0x0000 /* Normal nop instruction */ +#define NOP_EXIT 0x0001 /* End of simulation */ +#define NOP_REPORT 0x0002 /* Simple report */ +/*#define NOP_PRINTF 0x0003 Simprintf instruction (obsolete)*/ +#define NOP_PUTC 0x0004 /* JPB: Simputc instruction */ +#define NOP_CNT_RESET 0x0005 /* Reset statistics counters */ +#define NOP_GET_TICKS 0x0006 /* JPB: Get # ticks running */ +#define NOP_GET_PS 0x0007 /* JPB: Get picosecs/cycle */ +#define NOP_REPORT_FIRST 0x0400 /* Report with number */ +#define NOP_REPORT_LAST 0x03ff /* Report with number */ + +#endif /* SPR_DEFS__H */ diff --git a/arch/openrisc/include/asm/string.h b/arch/openrisc/include/asm/string.h new file mode 100644 index 000000000..69b975454 --- /dev/null +++ b/arch/openrisc/include/asm/string.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_OPENRISC_STRING_H +#define __ASM_OPENRISC_STRING_H + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *s, int c, __kernel_size_t n); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *dest, __const void *src, __kernel_size_t n); + +#endif /* __ASM_OPENRISC_STRING_H */ diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h new file mode 100644 index 000000000..2db9f1cf0 --- /dev/null +++ b/arch/openrisc/include/asm/syscall.h @@ -0,0 +1,79 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_SYSCALL_H__ +#define __ASM_OPENRISC_SYSCALL_H__ + +#include <uapi/linux/audit.h> +#include <linux/err.h> +#include <linux/sched.h> + +static inline int +syscall_get_nr(struct task_struct *task, struct pt_regs *regs) +{ + return regs->orig_gpr11; +} + +static inline void +syscall_rollback(struct task_struct *task, struct pt_regs *regs) +{ + regs->gpr[11] = regs->orig_gpr11; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + return IS_ERR_VALUE(regs->gpr[11]) ? regs->gpr[11] : 0; +} + +static inline long +syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) +{ + return regs->gpr[11]; +} + +static inline void +syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val) +{ + regs->gpr[11] = (long) error ?: val; +} + +static inline void +syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, unsigned long *args) +{ + BUG_ON(i + n > 6); + + memcpy(args, ®s->gpr[3 + i], n * sizeof(args[0])); +} + +static inline void +syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, const unsigned long *args) +{ + BUG_ON(i + n > 6); + + memcpy(®s->gpr[3 + i], args, n * sizeof(args[0])); +} + +static inline int syscall_get_arch(void) +{ + return AUDIT_ARCH_OPENRISC; +} +#endif diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h new file mode 100644 index 000000000..8ee816812 --- /dev/null +++ b/arch/openrisc/include/asm/syscalls.h @@ -0,0 +1,34 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_SYSCALLS_H +#define __ASM_OPENRISC_SYSCALLS_H + +asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1, + unsigned long *v2); + +#include <asm-generic/syscalls.h> + +asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, + void __user *parent_tid, void __user *child_tid, int tls); +asmlinkage long __sys_fork(void); + +#define sys_clone __sys_clone +#define sys_fork __sys_fork + +#endif /* __ASM_OPENRISC_SYSCALLS_H */ diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h new file mode 100644 index 000000000..5c15dfa2f --- /dev/null +++ b/arch/openrisc/include/asm/thread_info.h @@ -0,0 +1,125 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include <asm/types.h> +#include <asm/processor.h> +#endif + + +/* THREAD_SIZE is the size of the task_struct/kernel_stack combo. + * normally, the stack is found by doing something like p + THREAD_SIZE + * in or32, a page is 8192 bytes, which seems like a sane size + */ + +#define THREAD_SIZE_ORDER 0 +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ +#ifndef __ASSEMBLY__ + +typedef unsigned long mm_segment_t; + +struct thread_info { + struct task_struct *task; /* main task structure */ + unsigned long flags; /* low level flags */ + __u32 cpu; /* current CPU */ + __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + + mm_segment_t addr_limit; /* thread address space: + 0-0x7FFFFFFF for user-thead + 0-0xFFFFFFFF for kernel-thread + */ + __u8 supervisor_stack[0]; + + /* saved context data */ + unsigned long ksp; +}; +#endif + +/* + * macros/functions for gaining access to the thread information structure + * + * preempt_count needs to be 1 initially, until the scheduler is functional. + */ +#ifndef __ASSEMBLY__ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ + .ksp = 0, \ +} + +/* how to get the thread information struct from C */ +register struct thread_info *current_thread_info_reg asm("r10"); +#define current_thread_info() (current_thread_info_reg) + +#define get_thread_info(ti) get_task_struct((ti)->task) +#define put_thread_info(ti) put_task_struct((ti)->task) + +#endif /* !__ASSEMBLY__ */ + +/* + * thread information flags + * these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_SINGLESTEP 4 /* restore singlestep on return to user + * mode + */ +#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ +#define TIF_RESTORE_SIGMASK 9 +#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling * TIF_NEED_RESCHED + */ +#define TIF_MEMDIE 17 + +#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) +#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) + + +/* Work to do when returning from interrupt/exception */ +/* For OpenRISC, this is anything in the LSW other than syscall trace */ +#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP)) + +#endif /* __KERNEL__ */ + +#endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/openrisc/include/asm/time.h b/arch/openrisc/include/asm/time.h new file mode 100644 index 000000000..313ee9757 --- /dev/null +++ b/arch/openrisc/include/asm/time.h @@ -0,0 +1,23 @@ +/* + * OpenRISC timer API + * + * Copyright (C) 2017 by Stafford Horne (shorne@gmail.com) + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __ASM_OR1K_TIME_H +#define __ASM_OR1K_TIME_H + +extern void openrisc_clockevent_init(void); + +extern void openrisc_timer_set(unsigned long count); +extern void openrisc_timer_set_next(unsigned long delta); + +#ifdef CONFIG_SMP +extern void synchronise_count_master(int cpu); +extern void synchronise_count_slave(int cpu); +#endif + +#endif /* __ASM_OR1K_TIME_H */ diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h new file mode 100644 index 000000000..34d015bf0 --- /dev/null +++ b/arch/openrisc/include/asm/timex.h @@ -0,0 +1,37 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_TIMEX_H +#define __ASM_OPENRISC_TIMEX_H + +#define get_cycles get_cycles + +#include <asm-generic/timex.h> +#include <asm/spr.h> +#include <asm/spr_defs.h> + +static inline cycles_t get_cycles(void) +{ + return mfspr(SPR_TTCR); +} +#define get_cycles get_cycles + +/* This isn't really used any more */ +#define CLOCK_TICK_RATE 1000 + +#define ARCH_HAS_READ_CURRENT_TIMER + +#endif diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h new file mode 100644 index 000000000..fa4376a45 --- /dev/null +++ b/arch/openrisc/include/asm/tlb.h @@ -0,0 +1,34 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_TLB_H__ +#define __ASM_OPENRISC_TLB_H__ + +/* + * or32 doesn't need any special per-pte or + * per-vma handling.. + */ +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) +#include <linux/pagemap.h> +#include <asm-generic/tlb.h> + +#endif /* __ASM_OPENRISC_TLB_H__ */ diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h new file mode 100644 index 000000000..94227f0ea --- /dev/null +++ b/arch/openrisc/include/asm/tlbflush.h @@ -0,0 +1,68 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_TLBFLUSH_H +#define __ASM_OPENRISC_TLBFLUSH_H + +#include <linux/mm.h> +#include <asm/processor.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/current.h> +#include <linux/sched.h> + +/* + * - flush_tlb() flushes the current mm struct TLBs + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + */ +extern void local_flush_tlb_all(void); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr); +extern void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end); + +#ifndef CONFIG_SMP +#define flush_tlb_all local_flush_tlb_all +#define flush_tlb_mm local_flush_tlb_mm +#define flush_tlb_page local_flush_tlb_page +#define flush_tlb_range local_flush_tlb_range +#else +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +#endif + +static inline void flush_tlb(void) +{ + flush_tlb_mm(current->mm); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_range(NULL, start, end); +} + +#endif /* __ASM_OPENRISC_TLBFLUSH_H */ diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h new file mode 100644 index 000000000..8b204cd1f --- /dev/null +++ b/arch/openrisc/include/asm/uaccess.h @@ -0,0 +1,273 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_UACCESS_H +#define __ASM_OPENRISC_UACCESS_H + +/* + * User space memory access functions + */ +#include <linux/prefetch.h> +#include <linux/string.h> +#include <asm/page.h> +#include <asm/extable.h> + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + */ + +/* addr_limit is the maximum accessible address for the task. we misuse + * the KERNEL_DS and USER_DS values to both assign and compare the + * addr_limit values through the equally misnamed get/set_fs macros. + * (see above) + */ + +#define KERNEL_DS (~0UL) +#define get_ds() (KERNEL_DS) + +#define USER_DS (TASK_SIZE) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a, b) ((a) == (b)) + +/* Ensure that the range from addr to addr+size is all within the process' + * address space + */ +#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size)) + +/* Ensure that addr is below task's addr_limit */ +#define __addr_ok(addr) ((unsigned long) addr < get_fs()) + +#define access_ok(type, addr, size) \ +({ \ + unsigned long __ao_addr = (unsigned long)(addr); \ + unsigned long __ao_size = (unsigned long)(size); \ + __range_ok(__ao_addr, __ao_size); \ +}) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the uglyness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + * + * As we use the same address space for kernel and user data on the + * PowerPC, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + */ +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + +extern long __put_user_bad(void); + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err; \ + __put_user_size((x), (ptr), (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __put_user_asm(x, ptr, retval, "l.sb"); break; \ + case 2: __put_user_asm(x, ptr, retval, "l.sh"); break; \ + case 4: __put_user_asm(x, ptr, retval, "l.sw"); break; \ + case 8: __put_user_asm2(x, ptr, retval); break; \ + default: __put_user_bad(); \ + } \ +} while (0) + +struct __large_struct { + unsigned long buf[100]; +}; +#define __m(x) (*(struct __large_struct *)(x)) + +/* + * We don't tell gcc that we are accessing memory, but this is OK + * because we do not write to any memory gcc knows about, so there + * are no aliasing issues. + */ +#define __put_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" 0(%2),%1\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: l.addi %0,r0,%3\n" \ + " l.j 2b\n" \ + " l.nop\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,3b\n" \ + ".previous" \ + : "=r"(err) \ + : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err)) + +#define __put_user_asm2(x, addr, err) \ + __asm__ __volatile__( \ + "1: l.sw 0(%2),%1\n" \ + "2: l.sw 4(%2),%H1\n" \ + "3:\n" \ + ".section .fixup,\"ax\"\n" \ + "4: l.addi %0,r0,%3\n" \ + " l.j 3b\n" \ + " l.nop\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,4b\n" \ + " .long 2b,4b\n" \ + ".previous" \ + : "=r"(err) \ + : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err)) + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err, __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT, __gu_val = 0; \ + const __typeof__(*(ptr)) * __gu_addr = (ptr); \ + if (access_ok(VERIFY_READ, __gu_addr, size)) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +extern long __get_user_bad(void); + +#define __get_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \ + case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ + case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ + case 8: __get_user_asm2(x, ptr, retval); break; \ + default: (x) = __get_user_bad(); \ + } \ +} while (0) + +#define __get_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %1,0(%2)\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: l.addi %0,r0,%3\n" \ + " l.addi %1,r0,0\n" \ + " l.j 2b\n" \ + " l.nop\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,3b\n" \ + ".previous" \ + : "=r"(err), "=r"(x) \ + : "r"(addr), "i"(-EFAULT), "0"(err)) + +#define __get_user_asm2(x, addr, err) \ + __asm__ __volatile__( \ + "1: l.lwz %1,0(%2)\n" \ + "2: l.lwz %H1,4(%2)\n" \ + "3:\n" \ + ".section .fixup,\"ax\"\n" \ + "4: l.addi %0,r0,%3\n" \ + " l.addi %1,r0,0\n" \ + " l.addi %H1,r0,0\n" \ + " l.j 3b\n" \ + " l.nop\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,4b\n" \ + " .long 2b,4b\n" \ + ".previous" \ + : "=r"(err), "=&r"(x) \ + : "r"(addr), "i"(-EFAULT), "0"(err)) + +/* more complex routines */ + +extern unsigned long __must_check +__copy_tofrom_user(void *to, const void *from, unsigned long size); +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long size) +{ + return __copy_tofrom_user(to, (__force const void *)from, size); +} +static inline unsigned long +raw_copy_to_user(void *to, const void __user *from, unsigned long size) +{ + return __copy_tofrom_user((__force void *)to, from, size); +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +extern unsigned long __clear_user(void *addr, unsigned long size); + +static inline __must_check unsigned long +clear_user(void *addr, unsigned long size) +{ + if (likely(access_ok(VERIFY_WRITE, addr, size))) + size = __clear_user(addr, size); + return size; +} + +#define user_addr_max() \ + (uaccess_kernel() ? ~0UL : TASK_SIZE) + +extern long strncpy_from_user(char *dest, const char __user *src, long count); + +extern __must_check long strnlen_user(const char __user *str, long n); + +#endif /* __ASM_OPENRISC_UACCESS_H */ diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h new file mode 100644 index 000000000..1141cbd6f --- /dev/null +++ b/arch/openrisc/include/asm/unaligned.h @@ -0,0 +1,51 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_UNALIGNED_H +#define __ASM_OPENRISC_UNALIGNED_H + +/* + * This is copied from the generic implementation and the C-struct + * variant replaced with the memmove variant. The GCC compiler + * for the OR32 arch optimizes too aggressively for the C-struct + * variant to work, so use the memmove variant instead. + * + * It may be worth considering implementing the unaligned access + * exception handler and allowing unaligned accesses (access_ok.h)... + * not sure if it would be much of a performance win without further + * investigation. + */ +#include <asm/byteorder.h> + +#if defined(__LITTLE_ENDIAN) +# include <linux/unaligned/le_memmove.h> +# include <linux/unaligned/be_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#elif defined(__BIG_ENDIAN) +# include <linux/unaligned/be_memmove.h> +# include <linux/unaligned/le_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#else +# error need to define endianess +#endif + +#endif /* __ASM_OPENRISC_UNALIGNED_H */ diff --git a/arch/openrisc/include/asm/unwinder.h b/arch/openrisc/include/asm/unwinder.h new file mode 100644 index 000000000..165ec6f02 --- /dev/null +++ b/arch/openrisc/include/asm/unwinder.h @@ -0,0 +1,20 @@ +/* + * OpenRISC unwinder.h + * + * Architecture API for unwinding stacks. + * + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __ASM_OPENRISC_UNWINDER_H +#define __ASM_OPENRISC_UNWINDER_H + +void unwind_stack(void *data, unsigned long *stack, + void (*trace)(void *data, unsigned long addr, + int reliable)); + +#endif /* __ASM_OPENRISC_UNWINDER_H */ diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild new file mode 100644 index 000000000..130c16ccb --- /dev/null +++ b/arch/openrisc/include/uapi/asm/Kbuild @@ -0,0 +1,32 @@ +# UAPI Header export list +include include/uapi/asm-generic/Kbuild.asm + +generic-y += auxvec.h +generic-y += bitsperlong.h +generic-y += bpf_perf_event.h +generic-y += errno.h +generic-y += fcntl.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += ipcbuf.h +generic-y += kvm_para.h +generic-y += mman.h +generic-y += msgbuf.h +generic-y += poll.h +generic-y += posix_types.h +generic-y += resource.h +generic-y += sembuf.h +generic-y += setup.h +generic-y += shmbuf.h +generic-y += shmparam.h +generic-y += siginfo.h +generic-y += signal.h +generic-y += socket.h +generic-y += sockios.h +generic-y += stat.h +generic-y += statfs.h +generic-y += swab.h +generic-y += termbits.h +generic-y += termios.h +generic-y += types.h +generic-y += ucontext.h diff --git a/arch/openrisc/include/uapi/asm/byteorder.h b/arch/openrisc/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..60d14f7e1 --- /dev/null +++ b/arch/openrisc/include/uapi/asm/byteorder.h @@ -0,0 +1 @@ +#include <linux/byteorder/big_endian.h> diff --git a/arch/openrisc/include/uapi/asm/elf.h b/arch/openrisc/include/uapi/asm/elf.h new file mode 100644 index 000000000..e892d5061 --- /dev/null +++ b/arch/openrisc/include/uapi/asm/elf.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UAPI__ASM_OPENRISC_ELF_H +#define _UAPI__ASM_OPENRISC_ELF_H + +/* + * This files is partially exported to userspace. This allows us to keep + * the ELF bits in one place which should assist in keeping the kernel and + * userspace in sync. + */ + +/* + * ELF register definitions.. + */ + +/* for struct user_regs_struct definition */ +#include <asm/ptrace.h> + +/* The OR1K relocation types... not all relevant for module loader */ +#define R_OR32_NONE 0 +#define R_OR32_32 1 +#define R_OR32_16 2 +#define R_OR32_8 3 +#define R_OR32_CONST 4 +#define R_OR32_CONSTH 5 +#define R_OR32_JUMPTARG 6 +#define R_OR32_VTINHERIT 7 +#define R_OR32_VTENTRY 8 + +typedef unsigned long elf_greg_t; + +/* + * Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is + * thus exposed to user-space. + */ +#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* A placeholder; OR32 does not have fp support yes, so no fp regs for now. */ +typedef unsigned long elf_fpregset_t; + +/* EM_OPENRISC is defined in linux/elf-em.h */ +#define EM_OR32 0x8472 + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_ARCH EM_OR32 +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2MSB + +#endif /* _UAPI__ASM_OPENRISC_ELF_H */ diff --git a/arch/openrisc/include/uapi/asm/param.h b/arch/openrisc/include/uapi/asm/param.h new file mode 100644 index 000000000..103471e31 --- /dev/null +++ b/arch/openrisc/include/uapi/asm/param.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_PARAM_H +#define __ASM_OPENRISC_PARAM_H + +#define EXEC_PAGESIZE 8192 + +#include <asm-generic/param.h> + +#endif /* __ASM_OPENRISC_PARAM_H */ diff --git a/arch/openrisc/include/uapi/asm/ptrace.h b/arch/openrisc/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..d4fab268f --- /dev/null +++ b/arch/openrisc/include/uapi/asm/ptrace.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UAPI__ASM_OPENRISC_PTRACE_H +#define _UAPI__ASM_OPENRISC_PTRACE_H + +#ifndef __ASSEMBLY__ +/* + * This is the layout of the regset returned by the GETREGSET ptrace call + */ +struct user_regs_struct { + /* GPR R0-R31... */ + unsigned long gpr[32]; + unsigned long pc; + unsigned long sr; +}; +#endif + + +#endif /* _UAPI__ASM_OPENRISC_PTRACE_H */ diff --git a/arch/openrisc/include/uapi/asm/sigcontext.h b/arch/openrisc/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..8ab775fc3 --- /dev/null +++ b/arch/openrisc/include/uapi/asm/sigcontext.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_SIGCONTEXT_H +#define __ASM_OPENRISC_SIGCONTEXT_H + +#include <asm/ptrace.h> + +/* This struct is saved by setup_frame in signal.c, to keep the current + context while a signal handler is executed. It's restored by sys_sigreturn. +*/ + +struct sigcontext { + struct user_regs_struct regs; /* needs to be first */ + unsigned long oldmask; +}; + +#endif /* __ASM_OPENRISC_SIGCONTEXT_H */ diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h new file mode 100644 index 000000000..11c5a58ab --- /dev/null +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#define sys_mmap2 sys_mmap_pgoff + +#define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_CLONE + +#include <asm-generic/unistd.h> + +#define __NR_or1k_atomic __NR_arch_specific_syscall +__SYSCALL(__NR_or1k_atomic, sys_or1k_atomic) diff --git a/arch/openrisc/kernel/.gitignore b/arch/openrisc/kernel/.gitignore new file mode 100644 index 000000000..c5f676c3c --- /dev/null +++ b/arch/openrisc/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile new file mode 100644 index 000000000..2d172e79f --- /dev/null +++ b/arch/openrisc/kernel/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +extra-y := head.o vmlinux.lds + +obj-y := setup.o or32_ksyms.o process.o dma.o \ + traps.o time.o irq.o entry.o ptrace.o signal.o \ + sys_call_table.o unwinder.o + +obj-$(CONFIG_SMP) += smp.o sync-timer.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_OF) += prom.o + +clean: diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c new file mode 100644 index 000000000..ddb736855 --- /dev/null +++ b/arch/openrisc/kernel/asm-offsets.c @@ -0,0 +1,66 @@ +/* + * OpenRISC asm-offsets.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * This program is used to generate definitions needed by + * assembly language modules. + * + * We use the technique used in the OSF Mach kernel code: + * generate asm statements containing #defines, + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + */ + +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/io.h> +#include <linux/thread_info.h> +#include <linux/kbuild.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> + +int main(void) +{ + /* offsets into the task_struct */ + DEFINE(TASK_STATE, offsetof(struct task_struct, state)); + DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); + DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); + DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); + DEFINE(TASK_MM, offsetof(struct task_struct, mm)); + DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + + /* offsets into thread_info */ + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_KSP, offsetof(struct thread_info, ksp)); + + DEFINE(PT_SIZE, sizeof(struct pt_regs)); + + /* Interrupt register frame */ + DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); + DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); + + DEFINE(NUM_USER_SEGMENTS, TASK_SIZE >> 28); + return 0; +} diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c new file mode 100644 index 000000000..159336adf --- /dev/null +++ b/arch/openrisc/kernel/dma.c @@ -0,0 +1,160 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * DMA mapping callbacks... + * As alloc_coherent is the only DMA callback being used currently, that's + * the only thing implemented properly. The rest need looking into... + */ + +#include <linux/dma-noncoherent.h> + +#include <asm/cpuinfo.h> +#include <asm/spr_defs.h> +#include <asm/tlbflush.h> + +static int +page_set_nocache(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + unsigned long cl; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + pte_val(*pte) |= _PAGE_CI; + + /* + * Flush the page out of the TLB so that the new page flags get + * picked up next time there's an access + */ + flush_tlb_page(NULL, addr); + + /* Flush page out of dcache */ + for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) + mtspr(SPR_DCBFR, cl); + + return 0; +} + +static int +page_clear_nocache(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pte_val(*pte) &= ~_PAGE_CI; + + /* + * Flush the page out of the TLB so that the new page flags get + * picked up next time there's an access + */ + flush_tlb_page(NULL, addr); + + return 0; +} + +/* + * Alloc "coherent" memory, which for OpenRISC means simply uncached. + * + * This function effectively just calls __get_free_pages, sets the + * cache-inhibit bit on those pages, and makes sure that the pages are + * flushed out of the cache before they are used. + * + * If the NON_CONSISTENT attribute is set, then this function just + * returns "normal", cachable memory. + * + * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take + * into consideration here, too. All current known implementations of + * the OR1K support only strongly ordered memory accesses, so that flag + * is being ignored for now; uncached but write-combined memory is a + * missing feature of the OR1K. + */ +void * +arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + unsigned long va; + void *page; + struct mm_walk walk = { + .pte_entry = page_set_nocache, + .mm = &init_mm + }; + + page = alloc_pages_exact(size, gfp); + if (!page) + return NULL; + + /* This gives us the real physical address of the first page. */ + *dma_handle = __pa(page); + + va = (unsigned long)page; + + if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { + /* + * We need to iterate through the pages, clearing the dcache for + * them and setting the cache-inhibit bit. + */ + if (walk_page_range(va, va + size, &walk)) { + free_pages_exact(page, size); + return NULL; + } + } + + return (void *)va; +} + +void +arch_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + unsigned long va = (unsigned long)vaddr; + struct mm_walk walk = { + .pte_entry = page_clear_nocache, + .mm = &init_mm + }; + + if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { + /* walk_page_range shouldn't be able to fail here */ + WARN_ON(walk_page_range(va, va + size, &walk)); + } + + free_pages_exact(vaddr, size); +} + +void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + unsigned long cl; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + switch (dir) { + case DMA_TO_DEVICE: + /* Flush the dcache for the requested range */ + for (cl = addr; cl < addr + size; + cl += cpuinfo->dcache_block_size) + mtspr(SPR_DCBFR, cl); + break; + case DMA_FROM_DEVICE: + /* Invalidate the dcache for the requested range */ + for (cl = addr; cl < addr + size; + cl += cpuinfo->dcache_block_size) + mtspr(SPR_DCBIR, cl); + break; + default: + /* + * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to + * flush nor invalidate the cache here as the area will need + * to be manually synced anyway. + */ + break; + } +} diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S new file mode 100644 index 000000000..c2c3ce8a0 --- /dev/null +++ b/arch/openrisc/kernel/entry.S @@ -0,0 +1,1220 @@ +/* + * OpenRISC entry.S + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/linkage.h> + +#include <asm/processor.h> +#include <asm/unistd.h> +#include <asm/thread_info.h> +#include <asm/errno.h> +#include <asm/spr_defs.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/asm-offsets.h> + +#define DISABLE_INTERRUPTS(t1,t2) \ + l.mfspr t2,r0,SPR_SR ;\ + l.movhi t1,hi(~(SPR_SR_IEE|SPR_SR_TEE)) ;\ + l.ori t1,t1,lo(~(SPR_SR_IEE|SPR_SR_TEE)) ;\ + l.and t2,t2,t1 ;\ + l.mtspr r0,t2,SPR_SR + +#define ENABLE_INTERRUPTS(t1) \ + l.mfspr t1,r0,SPR_SR ;\ + l.ori t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE) ;\ + l.mtspr r0,t1,SPR_SR + +/* =========================================================[ macros ]=== */ + +#ifdef CONFIG_TRACE_IRQFLAGS +/* + * Trace irq on/off creating a stack frame. + */ +#define TRACE_IRQS_OP(trace_op) \ + l.sw -8(r1),r2 /* store frame pointer */ ;\ + l.sw -4(r1),r9 /* store return address */ ;\ + l.addi r2,r1,0 /* move sp to fp */ ;\ + l.jal trace_op ;\ + l.addi r1,r1,-8 ;\ + l.ori r1,r2,0 /* restore sp */ ;\ + l.lwz r9,-4(r1) /* restore return address */ ;\ + l.lwz r2,-8(r1) /* restore fp */ ;\ +/* + * Trace irq on/off and save registers we need that would otherwise be + * clobbered. + */ +#define TRACE_IRQS_SAVE(t1,trace_op) \ + l.sw -12(r1),t1 /* save extra reg */ ;\ + l.sw -8(r1),r2 /* store frame pointer */ ;\ + l.sw -4(r1),r9 /* store return address */ ;\ + l.addi r2,r1,0 /* move sp to fp */ ;\ + l.jal trace_op ;\ + l.addi r1,r1,-12 ;\ + l.ori r1,r2,0 /* restore sp */ ;\ + l.lwz r9,-4(r1) /* restore return address */ ;\ + l.lwz r2,-8(r1) /* restore fp */ ;\ + l.lwz t1,-12(r1) /* restore extra reg */ + +#define TRACE_IRQS_OFF TRACE_IRQS_OP(trace_hardirqs_off) +#define TRACE_IRQS_ON TRACE_IRQS_OP(trace_hardirqs_on) +#define TRACE_IRQS_ON_SYSCALL \ + TRACE_IRQS_SAVE(r10,trace_hardirqs_on) ;\ + l.lwz r3,PT_GPR3(r1) ;\ + l.lwz r4,PT_GPR4(r1) ;\ + l.lwz r5,PT_GPR5(r1) ;\ + l.lwz r6,PT_GPR6(r1) ;\ + l.lwz r7,PT_GPR7(r1) ;\ + l.lwz r8,PT_GPR8(r1) ;\ + l.lwz r11,PT_GPR11(r1) +#define TRACE_IRQS_OFF_ENTRY \ + l.lwz r5,PT_SR(r1) ;\ + l.andi r3,r5,(SPR_SR_IEE|SPR_SR_TEE) ;\ + l.sfeq r5,r0 /* skip trace if irqs were already off */;\ + l.bf 1f ;\ + l.nop ;\ + TRACE_IRQS_SAVE(r4,trace_hardirqs_off) ;\ +1: +#else +#define TRACE_IRQS_OFF +#define TRACE_IRQS_ON +#define TRACE_IRQS_OFF_ENTRY +#define TRACE_IRQS_ON_SYSCALL +#endif + +/* + * We need to disable interrupts at beginning of RESTORE_ALL + * since interrupt might come in after we've loaded EPC return address + * and overwrite EPC with address somewhere in RESTORE_ALL + * which is of course wrong! + */ + +#define RESTORE_ALL \ + DISABLE_INTERRUPTS(r3,r4) ;\ + l.lwz r3,PT_PC(r1) ;\ + l.mtspr r0,r3,SPR_EPCR_BASE ;\ + l.lwz r3,PT_SR(r1) ;\ + l.mtspr r0,r3,SPR_ESR_BASE ;\ + l.lwz r2,PT_GPR2(r1) ;\ + l.lwz r3,PT_GPR3(r1) ;\ + l.lwz r4,PT_GPR4(r1) ;\ + l.lwz r5,PT_GPR5(r1) ;\ + l.lwz r6,PT_GPR6(r1) ;\ + l.lwz r7,PT_GPR7(r1) ;\ + l.lwz r8,PT_GPR8(r1) ;\ + l.lwz r9,PT_GPR9(r1) ;\ + l.lwz r10,PT_GPR10(r1) ;\ + l.lwz r11,PT_GPR11(r1) ;\ + l.lwz r12,PT_GPR12(r1) ;\ + l.lwz r13,PT_GPR13(r1) ;\ + l.lwz r14,PT_GPR14(r1) ;\ + l.lwz r15,PT_GPR15(r1) ;\ + l.lwz r16,PT_GPR16(r1) ;\ + l.lwz r17,PT_GPR17(r1) ;\ + l.lwz r18,PT_GPR18(r1) ;\ + l.lwz r19,PT_GPR19(r1) ;\ + l.lwz r20,PT_GPR20(r1) ;\ + l.lwz r21,PT_GPR21(r1) ;\ + l.lwz r22,PT_GPR22(r1) ;\ + l.lwz r23,PT_GPR23(r1) ;\ + l.lwz r24,PT_GPR24(r1) ;\ + l.lwz r25,PT_GPR25(r1) ;\ + l.lwz r26,PT_GPR26(r1) ;\ + l.lwz r27,PT_GPR27(r1) ;\ + l.lwz r28,PT_GPR28(r1) ;\ + l.lwz r29,PT_GPR29(r1) ;\ + l.lwz r30,PT_GPR30(r1) ;\ + l.lwz r31,PT_GPR31(r1) ;\ + l.lwz r1,PT_SP(r1) ;\ + l.rfe + + +#define EXCEPTION_ENTRY(handler) \ + .global handler ;\ +handler: ;\ + /* r1, EPCR, ESR a already saved */ ;\ + l.sw PT_GPR2(r1),r2 ;\ + l.sw PT_GPR3(r1),r3 ;\ + /* r4 already save */ ;\ + l.sw PT_GPR5(r1),r5 ;\ + l.sw PT_GPR6(r1),r6 ;\ + l.sw PT_GPR7(r1),r7 ;\ + l.sw PT_GPR8(r1),r8 ;\ + l.sw PT_GPR9(r1),r9 ;\ + /* r10 already saved */ ;\ + l.sw PT_GPR11(r1),r11 ;\ + /* r12 already saved */ ;\ + l.sw PT_GPR13(r1),r13 ;\ + l.sw PT_GPR14(r1),r14 ;\ + l.sw PT_GPR15(r1),r15 ;\ + l.sw PT_GPR16(r1),r16 ;\ + l.sw PT_GPR17(r1),r17 ;\ + l.sw PT_GPR18(r1),r18 ;\ + l.sw PT_GPR19(r1),r19 ;\ + l.sw PT_GPR20(r1),r20 ;\ + l.sw PT_GPR21(r1),r21 ;\ + l.sw PT_GPR22(r1),r22 ;\ + l.sw PT_GPR23(r1),r23 ;\ + l.sw PT_GPR24(r1),r24 ;\ + l.sw PT_GPR25(r1),r25 ;\ + l.sw PT_GPR26(r1),r26 ;\ + l.sw PT_GPR27(r1),r27 ;\ + l.sw PT_GPR28(r1),r28 ;\ + l.sw PT_GPR29(r1),r29 ;\ + /* r30 already save */ ;\ +/* l.sw PT_GPR30(r1),r30*/ ;\ + l.sw PT_GPR31(r1),r31 ;\ + TRACE_IRQS_OFF_ENTRY ;\ + /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ + l.addi r30,r0,-1 ;\ + l.sw PT_ORIG_GPR11(r1),r30 + +#define UNHANDLED_EXCEPTION(handler,vector) \ + .global handler ;\ +handler: ;\ + /* r1, EPCR, ESR already saved */ ;\ + l.sw PT_GPR2(r1),r2 ;\ + l.sw PT_GPR3(r1),r3 ;\ + l.sw PT_GPR5(r1),r5 ;\ + l.sw PT_GPR6(r1),r6 ;\ + l.sw PT_GPR7(r1),r7 ;\ + l.sw PT_GPR8(r1),r8 ;\ + l.sw PT_GPR9(r1),r9 ;\ + /* r10 already saved */ ;\ + l.sw PT_GPR11(r1),r11 ;\ + /* r12 already saved */ ;\ + l.sw PT_GPR13(r1),r13 ;\ + l.sw PT_GPR14(r1),r14 ;\ + l.sw PT_GPR15(r1),r15 ;\ + l.sw PT_GPR16(r1),r16 ;\ + l.sw PT_GPR17(r1),r17 ;\ + l.sw PT_GPR18(r1),r18 ;\ + l.sw PT_GPR19(r1),r19 ;\ + l.sw PT_GPR20(r1),r20 ;\ + l.sw PT_GPR21(r1),r21 ;\ + l.sw PT_GPR22(r1),r22 ;\ + l.sw PT_GPR23(r1),r23 ;\ + l.sw PT_GPR24(r1),r24 ;\ + l.sw PT_GPR25(r1),r25 ;\ + l.sw PT_GPR26(r1),r26 ;\ + l.sw PT_GPR27(r1),r27 ;\ + l.sw PT_GPR28(r1),r28 ;\ + l.sw PT_GPR29(r1),r29 ;\ + /* r31 already saved */ ;\ + l.sw PT_GPR30(r1),r30 ;\ +/* l.sw PT_GPR31(r1),r31 */ ;\ + /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ + l.addi r30,r0,-1 ;\ + l.sw PT_ORIG_GPR11(r1),r30 ;\ + l.addi r3,r1,0 ;\ + /* r4 is exception EA */ ;\ + l.addi r5,r0,vector ;\ + l.jal unhandled_exception ;\ + l.nop ;\ + l.j _ret_from_exception ;\ + l.nop + +/* clobbers 'reg' */ +#define CLEAR_LWA_FLAG(reg) \ + l.movhi reg,hi(lwa_flag) ;\ + l.ori reg,reg,lo(lwa_flag) ;\ + l.sw 0(reg),r0 +/* + * NOTE: one should never assume that SPR_EPC, SPR_ESR, SPR_EEAR + * contain the same values as when exception we're handling + * occured. in fact they never do. if you need them use + * values saved on stack (for SPR_EPC, SPR_ESR) or content + * of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE() + * in 'arch/openrisc/kernel/head.S' + */ + +/* =====================================================[ exceptions] === */ + +/* ---[ 0x100: RESET exception ]----------------------------------------- */ + +EXCEPTION_ENTRY(_tng_kernel_start) + l.jal _start + l.andi r0,r0,0 + +/* ---[ 0x200: BUS exception ]------------------------------------------- */ + +EXCEPTION_ENTRY(_bus_fault_handler) + CLEAR_LWA_FLAG(r3) + /* r4: EA of fault (set by EXCEPTION_HANDLE) */ + l.jal do_bus_fault + l.addi r3,r1,0 /* pt_regs */ + + l.j _ret_from_exception + l.nop + +/* ---[ 0x300: Data Page Fault exception ]------------------------------- */ +EXCEPTION_ENTRY(_dtlb_miss_page_fault_handler) + CLEAR_LWA_FLAG(r3) + l.and r5,r5,r0 + l.j 1f + l.nop + +EXCEPTION_ENTRY(_data_page_fault_handler) + CLEAR_LWA_FLAG(r3) + /* set up parameters for do_page_fault */ + l.ori r5,r0,0x300 // exception vector +1: + l.addi r3,r1,0 // pt_regs + /* r4 set be EXCEPTION_HANDLE */ // effective address of fault + +#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX + l.lwz r6,PT_PC(r3) // address of an offending insn + l.lwz r6,0(r6) // instruction that caused pf + + l.srli r6,r6,26 // check opcode for jump insn + l.sfeqi r6,0 // l.j + l.bf 8f + l.sfeqi r6,1 // l.jal + l.bf 8f + l.sfeqi r6,3 // l.bnf + l.bf 8f + l.sfeqi r6,4 // l.bf + l.bf 8f + l.sfeqi r6,0x11 // l.jr + l.bf 8f + l.sfeqi r6,0x12 // l.jalr + l.bf 8f + l.nop + + l.j 9f + l.nop + +8: // offending insn is in delay slot + l.lwz r6,PT_PC(r3) // address of an offending insn + l.addi r6,r6,4 + l.lwz r6,0(r6) // instruction that caused pf + l.srli r6,r6,26 // get opcode +9: // offending instruction opcode loaded in r6 + +#else + + l.mfspr r6,r0,SPR_SR // SR + l.andi r6,r6,SPR_SR_DSX // check for delay slot exception + l.sfne r6,r0 // exception happened in delay slot + l.bnf 7f + l.lwz r6,PT_PC(r3) // address of an offending insn + + l.addi r6,r6,4 // offending insn is in delay slot +7: + l.lwz r6,0(r6) // instruction that caused pf + l.srli r6,r6,26 // check opcode for write access +#endif + + l.sfgeui r6,0x33 // check opcode for write access + l.bnf 1f + l.sfleui r6,0x37 + l.bnf 1f + l.ori r6,r0,0x1 // write access + l.j 2f + l.nop +1: l.ori r6,r0,0x0 // !write access +2: + + /* call fault.c handler in or32/mm/fault.c */ + l.jal do_page_fault + l.nop + l.j _ret_from_exception + l.nop + +/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ +EXCEPTION_ENTRY(_itlb_miss_page_fault_handler) + CLEAR_LWA_FLAG(r3) + l.and r5,r5,r0 + l.j 1f + l.nop + +EXCEPTION_ENTRY(_insn_page_fault_handler) + CLEAR_LWA_FLAG(r3) + /* set up parameters for do_page_fault */ + l.ori r5,r0,0x400 // exception vector +1: + l.addi r3,r1,0 // pt_regs + /* r4 set be EXCEPTION_HANDLE */ // effective address of fault + l.ori r6,r0,0x0 // !write access + + /* call fault.c handler in or32/mm/fault.c */ + l.jal do_page_fault + l.nop + l.j _ret_from_exception + l.nop + + +/* ---[ 0x500: Timer exception ]----------------------------------------- */ + +EXCEPTION_ENTRY(_timer_handler) + CLEAR_LWA_FLAG(r3) + l.jal timer_interrupt + l.addi r3,r1,0 /* pt_regs */ + + l.j _ret_from_intr + l.nop + +/* ---[ 0x600: Alignment exception ]-------------------------------------- */ + +EXCEPTION_ENTRY(_alignment_handler) + CLEAR_LWA_FLAG(r3) + /* r4: EA of fault (set by EXCEPTION_HANDLE) */ + l.jal do_unaligned_access + l.addi r3,r1,0 /* pt_regs */ + + l.j _ret_from_exception + l.nop + +#if 0 +EXCEPTION_ENTRY(_alignment_handler) +// l.mfspr r2,r0,SPR_EEAR_BASE /* Load the effective address */ + l.addi r2,r4,0 +// l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */ + l.lwz r5,PT_PC(r1) + + l.lwz r3,0(r5) /* Load insn */ + l.srli r4,r3,26 /* Shift left to get the insn opcode */ + + l.sfeqi r4,0x00 /* Check if the load/store insn is in delay slot */ + l.bf jmp + l.sfeqi r4,0x01 + l.bf jmp + l.sfeqi r4,0x03 + l.bf jmp + l.sfeqi r4,0x04 + l.bf jmp + l.sfeqi r4,0x11 + l.bf jr + l.sfeqi r4,0x12 + l.bf jr + l.nop + l.j 1f + l.addi r5,r5,4 /* Increment PC to get return insn address */ + +jmp: + l.slli r4,r3,6 /* Get the signed extended jump length */ + l.srai r4,r4,4 + + l.lwz r3,4(r5) /* Load the real load/store insn */ + + l.add r5,r5,r4 /* Calculate jump target address */ + + l.j 1f + l.srli r4,r3,26 /* Shift left to get the insn opcode */ + +jr: + l.slli r4,r3,9 /* Shift to get the reg nb */ + l.andi r4,r4,0x7c + + l.lwz r3,4(r5) /* Load the real load/store insn */ + + l.add r4,r4,r1 /* Load the jump register value from the stack */ + l.lwz r5,0(r4) + + l.srli r4,r3,26 /* Shift left to get the insn opcode */ + + +1: +// l.mtspr r0,r5,SPR_EPCR_BASE + l.sw PT_PC(r1),r5 + + l.sfeqi r4,0x26 + l.bf lhs + l.sfeqi r4,0x25 + l.bf lhz + l.sfeqi r4,0x22 + l.bf lws + l.sfeqi r4,0x21 + l.bf lwz + l.sfeqi r4,0x37 + l.bf sh + l.sfeqi r4,0x35 + l.bf sw + l.nop + +1: l.j 1b /* I don't know what to do */ + l.nop + +lhs: l.lbs r5,0(r2) + l.slli r5,r5,8 + l.lbz r6,1(r2) + l.or r5,r5,r6 + l.srli r4,r3,19 + l.andi r4,r4,0x7c + l.add r4,r4,r1 + l.j align_end + l.sw 0(r4),r5 + +lhz: l.lbz r5,0(r2) + l.slli r5,r5,8 + l.lbz r6,1(r2) + l.or r5,r5,r6 + l.srli r4,r3,19 + l.andi r4,r4,0x7c + l.add r4,r4,r1 + l.j align_end + l.sw 0(r4),r5 + +lws: l.lbs r5,0(r2) + l.slli r5,r5,24 + l.lbz r6,1(r2) + l.slli r6,r6,16 + l.or r5,r5,r6 + l.lbz r6,2(r2) + l.slli r6,r6,8 + l.or r5,r5,r6 + l.lbz r6,3(r2) + l.or r5,r5,r6 + l.srli r4,r3,19 + l.andi r4,r4,0x7c + l.add r4,r4,r1 + l.j align_end + l.sw 0(r4),r5 + +lwz: l.lbz r5,0(r2) + l.slli r5,r5,24 + l.lbz r6,1(r2) + l.slli r6,r6,16 + l.or r5,r5,r6 + l.lbz r6,2(r2) + l.slli r6,r6,8 + l.or r5,r5,r6 + l.lbz r6,3(r2) + l.or r5,r5,r6 + l.srli r4,r3,19 + l.andi r4,r4,0x7c + l.add r4,r4,r1 + l.j align_end + l.sw 0(r4),r5 + +sh: + l.srli r4,r3,9 + l.andi r4,r4,0x7c + l.add r4,r4,r1 + l.lwz r5,0(r4) + l.sb 1(r2),r5 + l.srli r5,r5,8 + l.j align_end + l.sb 0(r2),r5 + +sw: + l.srli r4,r3,9 + l.andi r4,r4,0x7c + l.add r4,r4,r1 + l.lwz r5,0(r4) + l.sb 3(r2),r5 + l.srli r5,r5,8 + l.sb 2(r2),r5 + l.srli r5,r5,8 + l.sb 1(r2),r5 + l.srli r5,r5,8 + l.j align_end + l.sb 0(r2),r5 + +align_end: + l.j _ret_from_intr + l.nop +#endif + +/* ---[ 0x700: Illegal insn exception ]---------------------------------- */ + +EXCEPTION_ENTRY(_illegal_instruction_handler) + /* r4: EA of fault (set by EXCEPTION_HANDLE) */ + l.jal do_illegal_instruction + l.addi r3,r1,0 /* pt_regs */ + + l.j _ret_from_exception + l.nop + +/* ---[ 0x800: External interrupt exception ]---------------------------- */ + +EXCEPTION_ENTRY(_external_irq_handler) +#ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK + l.lwz r4,PT_SR(r1) // were interrupts enabled ? + l.andi r4,r4,SPR_SR_IEE + l.sfeqi r4,0 + l.bnf 1f // ext irq enabled, all ok. + l.nop + +#ifdef CONFIG_PRINTK + l.addi r1,r1,-0x8 + l.movhi r3,hi(42f) + l.ori r3,r3,lo(42f) + l.sw 0x0(r1),r3 + l.jal printk + l.sw 0x4(r1),r4 + l.addi r1,r1,0x8 + + .section .rodata, "a" +42: + .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r" + .align 4 + .previous +#endif + + l.ori r4,r4,SPR_SR_IEE // fix the bug +// l.sw PT_SR(r1),r4 +1: +#endif + CLEAR_LWA_FLAG(r3) + l.addi r3,r1,0 + l.movhi r8,hi(do_IRQ) + l.ori r8,r8,lo(do_IRQ) + l.jalr r8 + l.nop + l.j _ret_from_intr + l.nop + +/* ---[ 0x900: DTLB miss exception ]------------------------------------- */ + + +/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ + + +/* ---[ 0xb00: Range exception ]----------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0xb00,0xb00) + +/* ---[ 0xc00: Syscall exception ]--------------------------------------- */ + +/* + * Syscalls are a special type of exception in that they are + * _explicitly_ invoked by userspace and can therefore be + * held to conform to the same ABI as normal functions with + * respect to whether registers are preserved across the call + * or not. + */ + +/* Upon syscall entry we just save the callee-saved registers + * and not the call-clobbered ones. + */ + +_string_syscall_return: + .string "syscall return %ld \n\r\0" + .align 4 + +ENTRY(_sys_call_handler) + /* r1, EPCR, ESR a already saved */ + l.sw PT_GPR2(r1),r2 + /* r3-r8 must be saved because syscall restart relies + * on us being able to restart the syscall args... technically + * they should be clobbered, otherwise + */ + l.sw PT_GPR3(r1),r3 + /* + * r4 already saved + * r4 holds the EEAR address of the fault, use it as screatch reg and + * then load the original r4 + */ + CLEAR_LWA_FLAG(r4) + l.lwz r4,PT_GPR4(r1) + l.sw PT_GPR5(r1),r5 + l.sw PT_GPR6(r1),r6 + l.sw PT_GPR7(r1),r7 + l.sw PT_GPR8(r1),r8 + l.sw PT_GPR9(r1),r9 + /* r10 already saved */ + l.sw PT_GPR11(r1),r11 + /* orig_gpr11 must be set for syscalls */ + l.sw PT_ORIG_GPR11(r1),r11 + /* r12,r13 already saved */ + + /* r14-r28 (even) aren't touched by the syscall fast path below + * so we don't need to save them. However, the functions that return + * to userspace via a call to switch() DO need to save these because + * switch() effectively clobbers them... saving these registers for + * such functions is handled in their syscall wrappers (see fork, vfork, + * and clone, below). + + /* r30 is the only register we clobber in the fast path */ + /* r30 already saved */ +/* l.sw PT_GPR30(r1),r30 */ + +_syscall_check_trace_enter: + /* syscalls run with interrupts enabled */ + TRACE_IRQS_ON_SYSCALL + ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp + + /* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */ + l.lwz r30,TI_FLAGS(r10) + l.andi r30,r30,_TIF_SYSCALL_TRACE + l.sfne r30,r0 + l.bf _syscall_trace_enter + l.nop + +_syscall_check: + /* Ensure that the syscall number is reasonable */ + l.sfgeui r11,__NR_syscalls + l.bf _syscall_badsys + l.nop + +_syscall_call: + l.movhi r29,hi(sys_call_table) + l.ori r29,r29,lo(sys_call_table) + l.slli r11,r11,2 + l.add r29,r29,r11 + l.lwz r29,0(r29) + + l.jalr r29 + l.nop + +_syscall_return: + /* All syscalls return here... just pay attention to ret_from_fork + * which does it in a round-about way. + */ + l.sw PT_GPR11(r1),r11 // save return value + +#if 0 +_syscall_debug: + l.movhi r3,hi(_string_syscall_return) + l.ori r3,r3,lo(_string_syscall_return) + l.ori r27,r0,1 + l.sw -4(r1),r27 + l.sw -8(r1),r11 + l.addi r1,r1,-8 + l.movhi r27,hi(printk) + l.ori r27,r27,lo(printk) + l.jalr r27 + l.nop + l.addi r1,r1,8 +#endif + +_syscall_check_trace_leave: + /* r30 is a callee-saved register so this should still hold the + * _TIF_SYSCALL_TRACE flag from _syscall_check_trace_enter above... + * _syscall_trace_leave expects syscall result to be in pt_regs->r11. + */ + l.sfne r30,r0 + l.bf _syscall_trace_leave + l.nop + +/* This is where the exception-return code begins... interrupts need to be + * disabled the rest of the way here because we can't afford to miss any + * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */ + +_syscall_check_work: + /* Here we need to disable interrupts */ + DISABLE_INTERRUPTS(r27,r29) + TRACE_IRQS_OFF + l.lwz r30,TI_FLAGS(r10) + l.andi r30,r30,_TIF_WORK_MASK + l.sfne r30,r0 + + l.bnf _syscall_resume_userspace + l.nop + + /* Work pending follows a different return path, so we need to + * make sure that all the call-saved registers get into pt_regs + * before branching... + */ + l.sw PT_GPR14(r1),r14 + l.sw PT_GPR16(r1),r16 + l.sw PT_GPR18(r1),r18 + l.sw PT_GPR20(r1),r20 + l.sw PT_GPR22(r1),r22 + l.sw PT_GPR24(r1),r24 + l.sw PT_GPR26(r1),r26 + l.sw PT_GPR28(r1),r28 + + /* _work_pending needs to be called with interrupts disabled */ + l.j _work_pending + l.nop + +_syscall_resume_userspace: +// ENABLE_INTERRUPTS(r29) + + +/* This is the hot path for returning to userspace from a syscall. If there's + * work to be done and the branch to _work_pending was taken above, then the + * return to userspace will be done via the normal exception return path... + * that path restores _all_ registers and will overwrite the "clobbered" + * registers with whatever garbage is in pt_regs -- that's OK because those + * registers are clobbered anyway and because the extra work is insignificant + * in the context of the extra work that _work_pending is doing. + +/* Once again, syscalls are special and only guarantee to preserve the + * same registers as a normal function call */ + +/* The assumption here is that the registers r14-r28 (even) are untouched and + * don't need to be restored... be sure that that's really the case! + */ + +/* This is still too much... we should only be restoring what we actually + * clobbered... we should even be using 'scratch' (odd) regs above so that + * we don't need to restore anything, hardly... + */ + + l.lwz r2,PT_GPR2(r1) + + /* Restore args */ + /* r3-r8 are technically clobbered, but syscall restart needs these + * to be restored... + */ + l.lwz r3,PT_GPR3(r1) + l.lwz r4,PT_GPR4(r1) + l.lwz r5,PT_GPR5(r1) + l.lwz r6,PT_GPR6(r1) + l.lwz r7,PT_GPR7(r1) + l.lwz r8,PT_GPR8(r1) + + l.lwz r9,PT_GPR9(r1) + l.lwz r10,PT_GPR10(r1) + l.lwz r11,PT_GPR11(r1) + + /* r30 is the only register we clobber in the fast path */ + l.lwz r30,PT_GPR30(r1) + + /* Here we use r13-r19 (odd) as scratch regs */ + l.lwz r13,PT_PC(r1) + l.lwz r15,PT_SR(r1) + l.lwz r1,PT_SP(r1) + /* Interrupts need to be disabled for setting EPCR and ESR + * so that another interrupt doesn't come in here and clobber + * them before we can use them for our l.rfe */ + DISABLE_INTERRUPTS(r17,r19) + l.mtspr r0,r13,SPR_EPCR_BASE + l.mtspr r0,r15,SPR_ESR_BASE + l.rfe + +/* End of hot path! + * Keep the below tracing and error handling out of the hot path... +*/ + +_syscall_trace_enter: + /* Here we pass pt_regs to do_syscall_trace_enter. Make sure + * that function is really getting all the info it needs as + * pt_regs isn't a complete set of userspace regs, just the + * ones relevant to the syscall... + * + * Note use of delay slot for setting argument. + */ + l.jal do_syscall_trace_enter + l.addi r3,r1,0 + + /* Restore arguments (not preserved across do_syscall_trace_enter) + * so that we can do the syscall for real and return to the syscall + * hot path. + */ + l.lwz r11,PT_GPR11(r1) + l.lwz r3,PT_GPR3(r1) + l.lwz r4,PT_GPR4(r1) + l.lwz r5,PT_GPR5(r1) + l.lwz r6,PT_GPR6(r1) + l.lwz r7,PT_GPR7(r1) + + l.j _syscall_check + l.lwz r8,PT_GPR8(r1) + +_syscall_trace_leave: + l.jal do_syscall_trace_leave + l.addi r3,r1,0 + + l.j _syscall_check_work + l.nop + +_syscall_badsys: + /* Here we effectively pretend to have executed an imaginary + * syscall that returns -ENOSYS and then return to the regular + * syscall hot path. + * Note that "return value" is set in the delay slot... + */ + l.j _syscall_return + l.addi r11,r0,-ENOSYS + +/******* END SYSCALL HANDLING *******/ + +/* ---[ 0xd00: Trap exception ]------------------------------------------ */ + +UNHANDLED_EXCEPTION(_vector_0xd00,0xd00) + +/* ---[ 0xe00: Trap exception ]------------------------------------------ */ + +EXCEPTION_ENTRY(_trap_handler) + CLEAR_LWA_FLAG(r3) + /* r4: EA of fault (set by EXCEPTION_HANDLE) */ + l.jal do_trap + l.addi r3,r1,0 /* pt_regs */ + + l.j _ret_from_exception + l.nop + +/* ---[ 0xf00: Reserved exception ]-------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0xf00,0xf00) + +/* ---[ 0x1000: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1000,0x1000) + +/* ---[ 0x1100: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1100,0x1100) + +/* ---[ 0x1200: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1200,0x1200) + +/* ---[ 0x1300: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1300,0x1300) + +/* ---[ 0x1400: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1400,0x1400) + +/* ---[ 0x1500: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1500,0x1500) + +/* ---[ 0x1600: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1600,0x1600) + +/* ---[ 0x1700: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1700,0x1700) + +/* ---[ 0x1800: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1800,0x1800) + +/* ---[ 0x1900: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1900,0x1900) + +/* ---[ 0x1a00: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00) + +/* ---[ 0x1b00: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00) + +/* ---[ 0x1c00: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00) + +/* ---[ 0x1d00: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00) + +/* ---[ 0x1e00: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00) + +/* ---[ 0x1f00: Reserved exception ]------------------------------------- */ + +UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00) + +/* ========================================================[ return ] === */ + +_resume_userspace: + DISABLE_INTERRUPTS(r3,r4) + TRACE_IRQS_OFF + l.lwz r4,TI_FLAGS(r10) + l.andi r13,r4,_TIF_WORK_MASK + l.sfeqi r13,0 + l.bf _restore_all + l.nop + +_work_pending: + l.lwz r5,PT_ORIG_GPR11(r1) + l.sfltsi r5,0 + l.bnf 1f + l.nop + l.andi r5,r5,0 +1: + l.jal do_work_pending + l.ori r3,r1,0 /* pt_regs */ + + l.sfeqi r11,0 + l.bf _restore_all + l.nop + l.sfltsi r11,0 + l.bnf 1f + l.nop + l.and r11,r11,r0 + l.ori r11,r11,__NR_restart_syscall + l.j _syscall_check_trace_enter + l.nop +1: + l.lwz r11,PT_ORIG_GPR11(r1) + /* Restore arg registers */ + l.lwz r3,PT_GPR3(r1) + l.lwz r4,PT_GPR4(r1) + l.lwz r5,PT_GPR5(r1) + l.lwz r6,PT_GPR6(r1) + l.lwz r7,PT_GPR7(r1) + l.j _syscall_check_trace_enter + l.lwz r8,PT_GPR8(r1) + +_restore_all: +#ifdef CONFIG_TRACE_IRQFLAGS + l.lwz r4,PT_SR(r1) + l.andi r3,r4,(SPR_SR_IEE|SPR_SR_TEE) + l.sfeq r3,r0 /* skip trace if irqs were off */ + l.bf skip_hardirqs_on + l.nop + TRACE_IRQS_ON +skip_hardirqs_on: +#endif + RESTORE_ALL + /* This returns to userspace code */ + + +ENTRY(_ret_from_intr) +ENTRY(_ret_from_exception) + l.lwz r4,PT_SR(r1) + l.andi r3,r4,SPR_SR_SM + l.sfeqi r3,0 + l.bnf _restore_all + l.nop + l.j _resume_userspace + l.nop + +ENTRY(ret_from_fork) + l.jal schedule_tail + l.nop + + /* Check if we are a kernel thread */ + l.sfeqi r20,0 + l.bf 1f + l.nop + + /* ...we are a kernel thread so invoke the requested callback */ + l.jalr r20 + l.or r3,r22,r0 + +1: + /* _syscall_returns expect r11 to contain return value */ + l.lwz r11,PT_GPR11(r1) + + /* The syscall fast path return expects call-saved registers + * r12-r28 to be untouched, so we restore them here as they + * will have been effectively clobbered when arriving here + * via the call to switch() + */ + l.lwz r12,PT_GPR12(r1) + l.lwz r14,PT_GPR14(r1) + l.lwz r16,PT_GPR16(r1) + l.lwz r18,PT_GPR18(r1) + l.lwz r20,PT_GPR20(r1) + l.lwz r22,PT_GPR22(r1) + l.lwz r24,PT_GPR24(r1) + l.lwz r26,PT_GPR26(r1) + l.lwz r28,PT_GPR28(r1) + + l.j _syscall_return + l.nop + +/* ========================================================[ switch ] === */ + +/* + * This routine switches between two different tasks. The process + * state of one is saved on its kernel stack. Then the state + * of the other is restored from its kernel stack. The memory + * management hardware is updated to the second process's state. + * Finally, we can return to the second process, via the 'return'. + * + * Note: there are two ways to get to the "going out" portion + * of this code; either by coming in via the entry (_switch) + * or via "fork" which must set up an environment equivalent + * to the "_switch" path. If you change this (or in particular, the + * SAVE_REGS macro), you'll have to change the fork code also. + */ + + +/* _switch MUST never lay on page boundry, cause it runs from + * effective addresses and beeing interrupted by iTLB miss would kill it. + * dTLB miss seams to never accour in the bad place since data accesses + * are from task structures which are always page aligned. + * + * The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR + * register, then load the previous register values and only at the end call + * the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets + * garbled and we end up calling l.rfe with the wrong EPCR. (same probably + * holds for ESR) + * + * To avoid this problems it is sufficient to align _switch to + * some nice round number smaller than it's size... + */ + +/* ABI rules apply here... we either enter _switch via schedule() or via + * an imaginary call to which we shall return at return_from_fork. Either + * way, we are a function call and only need to preserve the callee-saved + * registers when we return. As such, we don't need to save the registers + * on the stack that we won't be returning as they were... + */ + + .align 0x400 +ENTRY(_switch) + /* We don't store SR as _switch only gets called in a context where + * the SR will be the same going in and coming out... */ + + /* Set up new pt_regs struct for saving task state */ + l.addi r1,r1,-(INT_FRAME_SIZE) + + /* No need to store r1/PT_SP as it goes into KSP below */ + l.sw PT_GPR2(r1),r2 + l.sw PT_GPR9(r1),r9 + /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being + * and expects r12 to be callee-saved... */ + l.sw PT_GPR12(r1),r12 + l.sw PT_GPR14(r1),r14 + l.sw PT_GPR16(r1),r16 + l.sw PT_GPR18(r1),r18 + l.sw PT_GPR20(r1),r20 + l.sw PT_GPR22(r1),r22 + l.sw PT_GPR24(r1),r24 + l.sw PT_GPR26(r1),r26 + l.sw PT_GPR28(r1),r28 + l.sw PT_GPR30(r1),r30 + + l.addi r11,r10,0 /* Save old 'current' to 'last' return value*/ + + /* We use thread_info->ksp for storing the address of the above + * structure so that we can get back to it later... we don't want + * to lose the value of thread_info->ksp, though, so store it as + * pt_regs->sp so that we can easily restore it when we are made + * live again... + */ + + /* Save the old value of thread_info->ksp as pt_regs->sp */ + l.lwz r29,TI_KSP(r10) + l.sw PT_SP(r1),r29 + + /* Swap kernel stack pointers */ + l.sw TI_KSP(r10),r1 /* Save old stack pointer */ + l.or r10,r4,r0 /* Set up new current_thread_info */ + l.lwz r1,TI_KSP(r10) /* Load new stack pointer */ + + /* Restore the old value of thread_info->ksp */ + l.lwz r29,PT_SP(r1) + l.sw TI_KSP(r10),r29 + + /* ...and restore the registers, except r11 because the return value + * has already been set above. + */ + l.lwz r2,PT_GPR2(r1) + l.lwz r9,PT_GPR9(r1) + /* No need to restore r10 */ + /* ...and do not restore r11 */ + + /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being + * and expects r12 to be callee-saved... */ + l.lwz r12,PT_GPR12(r1) + l.lwz r14,PT_GPR14(r1) + l.lwz r16,PT_GPR16(r1) + l.lwz r18,PT_GPR18(r1) + l.lwz r20,PT_GPR20(r1) + l.lwz r22,PT_GPR22(r1) + l.lwz r24,PT_GPR24(r1) + l.lwz r26,PT_GPR26(r1) + l.lwz r28,PT_GPR28(r1) + l.lwz r30,PT_GPR30(r1) + + /* Unwind stack to pre-switch state */ + l.addi r1,r1,(INT_FRAME_SIZE) + + /* Return via the link-register back to where we 'came from', where + * that may be either schedule(), ret_from_fork(), or + * ret_from_kernel_thread(). If we are returning to a new thread, + * we are expected to have set up the arg to schedule_tail already, + * hence we do so here unconditionally: + */ + l.lwz r3,TI_TASK(r3) /* Load 'prev' as schedule_tail arg */ + l.jr r9 + l.nop + +/* ==================================================================== */ + +/* These all use the delay slot for setting the argument register, so the + * jump is always happening after the l.addi instruction. + * + * These are all just wrappers that don't touch the link-register r9, so the + * return from the "real" syscall function will return back to the syscall + * code that did the l.jal that brought us here. + */ + +/* fork requires that we save all the callee-saved registers because they + * are all effectively clobbered by the call to _switch. Here we store + * all the registers that aren't touched by the syscall fast path and thus + * weren't saved there. + */ + +_fork_save_extra_regs_and_call: + l.sw PT_GPR14(r1),r14 + l.sw PT_GPR16(r1),r16 + l.sw PT_GPR18(r1),r18 + l.sw PT_GPR20(r1),r20 + l.sw PT_GPR22(r1),r22 + l.sw PT_GPR24(r1),r24 + l.sw PT_GPR26(r1),r26 + l.jr r29 + l.sw PT_GPR28(r1),r28 + +ENTRY(__sys_clone) + l.movhi r29,hi(sys_clone) + l.ori r29,r29,lo(sys_clone) + l.j _fork_save_extra_regs_and_call + l.nop + +ENTRY(__sys_fork) + l.movhi r29,hi(sys_fork) + l.ori r29,r29,lo(sys_fork) + l.j _fork_save_extra_regs_and_call + l.nop + +ENTRY(sys_rt_sigreturn) + l.jal _sys_rt_sigreturn + l.addi r3,r1,0 + l.sfne r30,r0 + l.bnf _no_syscall_trace + l.nop + l.jal do_syscall_trace_leave + l.addi r3,r1,0 +_no_syscall_trace: + l.j _resume_userspace + l.nop + +/* This is a catch-all syscall for atomic instructions for the OpenRISC 1000. + * The functions takes a variable number of parameters depending on which + * particular flavour of atomic you want... parameter 1 is a flag identifying + * the atomic in question. Currently, this function implements the + * following variants: + * + * XCHG: + * @flag: 1 + * @ptr1: + * @ptr2: + * Atomically exchange the values in pointers 1 and 2. + * + */ + +ENTRY(sys_or1k_atomic) + /* FIXME: This ignores r3 and always does an XCHG */ + DISABLE_INTERRUPTS(r17,r19) + l.lwz r29,0(r4) + l.lwz r27,0(r5) + l.sw 0(r4),r27 + l.sw 0(r5),r29 + ENABLE_INTERRUPTS(r17) + l.jr r9 + l.or r11,r0,r0 + +/* ============================================================[ EOF ]=== */ diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S new file mode 100644 index 000000000..d7e49b947 --- /dev/null +++ b/arch/openrisc/kernel/head.S @@ -0,0 +1,1757 @@ +/* + * OpenRISC head.S + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/linkage.h> +#include <linux/threads.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/serial_reg.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/thread_info.h> +#include <asm/cache.h> +#include <asm/spr_defs.h> +#include <asm/asm-offsets.h> +#include <linux/of_fdt.h> + +#define tophys(rd,rs) \ + l.movhi rd,hi(-KERNELBASE) ;\ + l.add rd,rd,rs + +#define CLEAR_GPR(gpr) \ + l.movhi gpr,0x0 + +#define LOAD_SYMBOL_2_GPR(gpr,symbol) \ + l.movhi gpr,hi(symbol) ;\ + l.ori gpr,gpr,lo(symbol) + + +#define UART_BASE_ADD 0x90000000 + +#define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM) +#define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM) + +/* ============================================[ tmp store locations ]=== */ + +#define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32) + +/* + * emergency_print temporary stores + */ +#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS +#define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14) +#define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14) + +#define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15) +#define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15) + +#define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16) +#define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16) + +#define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7) +#define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7) + +#define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8) +#define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8) + +#define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9) +#define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9) + +#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ +#define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4 +#define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0) + +#define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5 +#define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0) + +#define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6 +#define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0) + +#define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7 +#define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0) + +#define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8 +#define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0) + +#define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9 +#define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0) + +#endif + +/* + * TLB miss handlers temorary stores + */ +#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS +#define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2) +#define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2) + +#define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3) +#define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3) + +#define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4) +#define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4) + +#define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5) +#define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5) + +#define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6) +#define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6) + +#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ +#define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2 +#define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0) + +#define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3 +#define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0) + +#define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4 +#define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0) + +#define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5 +#define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0) + +#define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6 +#define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0) + +#endif + +/* + * EXCEPTION_HANDLE temporary stores + */ + +#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS +#define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30) +#define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30) + +#define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10) +#define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10) + +#define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1) +#define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1) + +#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ +#define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30 +#define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0) + +#define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10 +#define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0) + +#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1 +#define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0) +#endif + +/* =========================================================[ macros ]=== */ + +#ifdef CONFIG_SMP +#define GET_CURRENT_PGD(reg,t1) \ + LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ + l.mfspr t1,r0,SPR_COREID ;\ + l.slli t1,t1,2 ;\ + l.add reg,reg,t1 ;\ + tophys (t1,reg) ;\ + l.lwz reg,0(t1) +#else +#define GET_CURRENT_PGD(reg,t1) \ + LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ + tophys (t1,reg) ;\ + l.lwz reg,0(t1) +#endif + +/* Load r10 from current_thread_info_set - clobbers r1 and r30 */ +#ifdef CONFIG_SMP +#define GET_CURRENT_THREAD_INFO \ + LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ + tophys (r30,r1) ;\ + l.mfspr r10,r0,SPR_COREID ;\ + l.slli r10,r10,2 ;\ + l.add r30,r30,r10 ;\ + /* r10: current_thread_info */ ;\ + l.lwz r10,0(r30) +#else +#define GET_CURRENT_THREAD_INFO \ + LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ + tophys (r30,r1) ;\ + /* r10: current_thread_info */ ;\ + l.lwz r10,0(r30) +#endif + +/* + * DSCR: this is a common hook for handling exceptions. it will save + * the needed registers, set up stack and pointer to current + * then jump to the handler while enabling MMU + * + * PRMS: handler - a function to jump to. it has to save the + * remaining registers to kernel stack, call + * appropriate arch-independant exception handler + * and finaly jump to ret_from_except + * + * PREQ: unchanged state from the time exception happened + * + * POST: SAVED the following registers original value + * to the new created exception frame pointed to by r1 + * + * r1 - ksp pointing to the new (exception) frame + * r4 - EEAR exception EA + * r10 - current pointing to current_thread_info struct + * r12 - syscall 0, since we didn't come from syscall + * r30 - handler address of the handler we'll jump to + * + * handler has to save remaining registers to the exception + * ksp frame *before* tainting them! + * + * NOTE: this function is not reentrant per se. reentrancy is guaranteed + * by processor disabling all exceptions/interrupts when exception + * accours. + * + * OPTM: no need to make it so wasteful to extract ksp when in user mode + */ + +#define EXCEPTION_HANDLE(handler) \ + EXCEPTION_T_STORE_GPR30 ;\ + l.mfspr r30,r0,SPR_ESR_BASE ;\ + l.andi r30,r30,SPR_SR_SM ;\ + l.sfeqi r30,0 ;\ + EXCEPTION_T_STORE_GPR10 ;\ + l.bnf 2f /* kernel_mode */ ;\ + EXCEPTION_T_STORE_SP /* delay slot */ ;\ +1: /* user_mode: */ ;\ + GET_CURRENT_THREAD_INFO ;\ + tophys (r30,r10) ;\ + l.lwz r1,(TI_KSP)(r30) ;\ + /* fall through */ ;\ +2: /* kernel_mode: */ ;\ + /* create new stack frame, save only needed gprs */ ;\ + /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\ + /* r12: temp, syscall indicator */ ;\ + l.addi r1,r1,-(INT_FRAME_SIZE) ;\ + /* r1 is KSP, r30 is __pa(KSP) */ ;\ + tophys (r30,r1) ;\ + l.sw PT_GPR12(r30),r12 ;\ + /* r4 use for tmp before EA */ ;\ + l.mfspr r12,r0,SPR_EPCR_BASE ;\ + l.sw PT_PC(r30),r12 ;\ + l.mfspr r12,r0,SPR_ESR_BASE ;\ + l.sw PT_SR(r30),r12 ;\ + /* save r30 */ ;\ + EXCEPTION_T_LOAD_GPR30(r12) ;\ + l.sw PT_GPR30(r30),r12 ;\ + /* save r10 as was prior to exception */ ;\ + EXCEPTION_T_LOAD_GPR10(r12) ;\ + l.sw PT_GPR10(r30),r12 ;\ + /* save PT_SP as was prior to exception */ ;\ + EXCEPTION_T_LOAD_SP(r12) ;\ + l.sw PT_SP(r30),r12 ;\ + /* save exception r4, set r4 = EA */ ;\ + l.sw PT_GPR4(r30),r4 ;\ + l.mfspr r4,r0,SPR_EEAR_BASE ;\ + /* r12 == 1 if we come from syscall */ ;\ + CLEAR_GPR(r12) ;\ + /* ----- turn on MMU ----- */ ;\ + /* Carry DSX into exception SR */ ;\ + l.mfspr r30,r0,SPR_SR ;\ + l.andi r30,r30,SPR_SR_DSX ;\ + l.ori r30,r30,(EXCEPTION_SR) ;\ + l.mtspr r0,r30,SPR_ESR_BASE ;\ + /* r30: EA address of handler */ ;\ + LOAD_SYMBOL_2_GPR(r30,handler) ;\ + l.mtspr r0,r30,SPR_EPCR_BASE ;\ + l.rfe + +/* + * this doesn't work + * + * + * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION + * #define UNHANDLED_EXCEPTION(handler) \ + * l.ori r3,r0,0x1 ;\ + * l.mtspr r0,r3,SPR_SR ;\ + * l.movhi r3,hi(0xf0000100) ;\ + * l.ori r3,r3,lo(0xf0000100) ;\ + * l.jr r3 ;\ + * l.nop 1 + * + * #endif + */ + +/* DSCR: this is the same as EXCEPTION_HANDLE(), we are just + * a bit more carefull (if we have a PT_SP or current pointer + * corruption) and set them up from 'current_set' + * + */ +#define UNHANDLED_EXCEPTION(handler) \ + EXCEPTION_T_STORE_GPR30 ;\ + EXCEPTION_T_STORE_GPR10 ;\ + EXCEPTION_T_STORE_SP ;\ + /* temporary store r3, r9 into r1, r10 */ ;\ + l.addi r1,r3,0x0 ;\ + l.addi r10,r9,0x0 ;\ + /* the string referenced by r3 must be low enough */ ;\ + l.jal _emergency_print ;\ + l.ori r3,r0,lo(_string_unhandled_exception) ;\ + l.mfspr r3,r0,SPR_NPC ;\ + l.jal _emergency_print_nr ;\ + l.andi r3,r3,0x1f00 ;\ + /* the string referenced by r3 must be low enough */ ;\ + l.jal _emergency_print ;\ + l.ori r3,r0,lo(_string_epc_prefix) ;\ + l.jal _emergency_print_nr ;\ + l.mfspr r3,r0,SPR_EPCR_BASE ;\ + l.jal _emergency_print ;\ + l.ori r3,r0,lo(_string_nl) ;\ + /* end of printing */ ;\ + l.addi r3,r1,0x0 ;\ + l.addi r9,r10,0x0 ;\ + /* extract current, ksp from current_set */ ;\ + LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\ + LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\ + /* create new stack frame, save only needed gprs */ ;\ + /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\ + /* r12: temp, syscall indicator, r13 temp */ ;\ + l.addi r1,r1,-(INT_FRAME_SIZE) ;\ + /* r1 is KSP, r30 is __pa(KSP) */ ;\ + tophys (r30,r1) ;\ + l.sw PT_GPR12(r30),r12 ;\ + l.mfspr r12,r0,SPR_EPCR_BASE ;\ + l.sw PT_PC(r30),r12 ;\ + l.mfspr r12,r0,SPR_ESR_BASE ;\ + l.sw PT_SR(r30),r12 ;\ + /* save r31 */ ;\ + EXCEPTION_T_LOAD_GPR30(r12) ;\ + l.sw PT_GPR30(r30),r12 ;\ + /* save r10 as was prior to exception */ ;\ + EXCEPTION_T_LOAD_GPR10(r12) ;\ + l.sw PT_GPR10(r30),r12 ;\ + /* save PT_SP as was prior to exception */ ;\ + EXCEPTION_T_LOAD_SP(r12) ;\ + l.sw PT_SP(r30),r12 ;\ + l.sw PT_GPR13(r30),r13 ;\ + /* --> */ ;\ + /* save exception r4, set r4 = EA */ ;\ + l.sw PT_GPR4(r30),r4 ;\ + l.mfspr r4,r0,SPR_EEAR_BASE ;\ + /* r12 == 1 if we come from syscall */ ;\ + CLEAR_GPR(r12) ;\ + /* ----- play a MMU trick ----- */ ;\ + l.ori r30,r0,(EXCEPTION_SR) ;\ + l.mtspr r0,r30,SPR_ESR_BASE ;\ + /* r31: EA address of handler */ ;\ + LOAD_SYMBOL_2_GPR(r30,handler) ;\ + l.mtspr r0,r30,SPR_EPCR_BASE ;\ + l.rfe + +/* =====================================================[ exceptions] === */ + +/* ---[ 0x100: RESET exception ]----------------------------------------- */ + .org 0x100 + /* Jump to .init code at _start which lives in the .head section + * and will be discarded after boot. + */ + LOAD_SYMBOL_2_GPR(r15, _start) + tophys (r13,r15) /* MMU disabled */ + l.jr r13 + l.nop + +/* ---[ 0x200: BUS exception ]------------------------------------------- */ + .org 0x200 +_dispatch_bus_fault: + EXCEPTION_HANDLE(_bus_fault_handler) + +/* ---[ 0x300: Data Page Fault exception ]------------------------------- */ + .org 0x300 +_dispatch_do_dpage_fault: +// totaly disable timer interrupt +// l.mtspr r0,r0,SPR_TTMR +// DEBUG_TLB_PROBE(0x300) +// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300) + EXCEPTION_HANDLE(_data_page_fault_handler) + +/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ + .org 0x400 +_dispatch_do_ipage_fault: +// totaly disable timer interrupt +// l.mtspr r0,r0,SPR_TTMR +// DEBUG_TLB_PROBE(0x400) +// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400) + EXCEPTION_HANDLE(_insn_page_fault_handler) + +/* ---[ 0x500: Timer exception ]----------------------------------------- */ + .org 0x500 + EXCEPTION_HANDLE(_timer_handler) + +/* ---[ 0x600: Alignment exception ]-------------------------------------- */ + .org 0x600 + EXCEPTION_HANDLE(_alignment_handler) + +/* ---[ 0x700: Illegal insn exception ]---------------------------------- */ + .org 0x700 + EXCEPTION_HANDLE(_illegal_instruction_handler) + +/* ---[ 0x800: External interrupt exception ]---------------------------- */ + .org 0x800 + EXCEPTION_HANDLE(_external_irq_handler) + +/* ---[ 0x900: DTLB miss exception ]------------------------------------- */ + .org 0x900 + l.j boot_dtlb_miss_handler + l.nop + +/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ + .org 0xa00 + l.j boot_itlb_miss_handler + l.nop + +/* ---[ 0xb00: Range exception ]----------------------------------------- */ + .org 0xb00 + UNHANDLED_EXCEPTION(_vector_0xb00) + +/* ---[ 0xc00: Syscall exception ]--------------------------------------- */ + .org 0xc00 + EXCEPTION_HANDLE(_sys_call_handler) + +/* ---[ 0xd00: Trap exception ]------------------------------------------ */ + .org 0xd00 + UNHANDLED_EXCEPTION(_vector_0xd00) + +/* ---[ 0xe00: Trap exception ]------------------------------------------ */ + .org 0xe00 +// UNHANDLED_EXCEPTION(_vector_0xe00) + EXCEPTION_HANDLE(_trap_handler) + +/* ---[ 0xf00: Reserved exception ]-------------------------------------- */ + .org 0xf00 + UNHANDLED_EXCEPTION(_vector_0xf00) + +/* ---[ 0x1000: Reserved exception ]------------------------------------- */ + .org 0x1000 + UNHANDLED_EXCEPTION(_vector_0x1000) + +/* ---[ 0x1100: Reserved exception ]------------------------------------- */ + .org 0x1100 + UNHANDLED_EXCEPTION(_vector_0x1100) + +/* ---[ 0x1200: Reserved exception ]------------------------------------- */ + .org 0x1200 + UNHANDLED_EXCEPTION(_vector_0x1200) + +/* ---[ 0x1300: Reserved exception ]------------------------------------- */ + .org 0x1300 + UNHANDLED_EXCEPTION(_vector_0x1300) + +/* ---[ 0x1400: Reserved exception ]------------------------------------- */ + .org 0x1400 + UNHANDLED_EXCEPTION(_vector_0x1400) + +/* ---[ 0x1500: Reserved exception ]------------------------------------- */ + .org 0x1500 + UNHANDLED_EXCEPTION(_vector_0x1500) + +/* ---[ 0x1600: Reserved exception ]------------------------------------- */ + .org 0x1600 + UNHANDLED_EXCEPTION(_vector_0x1600) + +/* ---[ 0x1700: Reserved exception ]------------------------------------- */ + .org 0x1700 + UNHANDLED_EXCEPTION(_vector_0x1700) + +/* ---[ 0x1800: Reserved exception ]------------------------------------- */ + .org 0x1800 + UNHANDLED_EXCEPTION(_vector_0x1800) + +/* ---[ 0x1900: Reserved exception ]------------------------------------- */ + .org 0x1900 + UNHANDLED_EXCEPTION(_vector_0x1900) + +/* ---[ 0x1a00: Reserved exception ]------------------------------------- */ + .org 0x1a00 + UNHANDLED_EXCEPTION(_vector_0x1a00) + +/* ---[ 0x1b00: Reserved exception ]------------------------------------- */ + .org 0x1b00 + UNHANDLED_EXCEPTION(_vector_0x1b00) + +/* ---[ 0x1c00: Reserved exception ]------------------------------------- */ + .org 0x1c00 + UNHANDLED_EXCEPTION(_vector_0x1c00) + +/* ---[ 0x1d00: Reserved exception ]------------------------------------- */ + .org 0x1d00 + UNHANDLED_EXCEPTION(_vector_0x1d00) + +/* ---[ 0x1e00: Reserved exception ]------------------------------------- */ + .org 0x1e00 + UNHANDLED_EXCEPTION(_vector_0x1e00) + +/* ---[ 0x1f00: Reserved exception ]------------------------------------- */ + .org 0x1f00 + UNHANDLED_EXCEPTION(_vector_0x1f00) + + .org 0x2000 +/* ===================================================[ kernel start ]=== */ + +/* .text*/ + +/* This early stuff belongs in HEAD, but some of the functions below definitely + * don't... */ + + __HEAD + .global _start +_start: + /* Init r0 to zero as per spec */ + CLEAR_GPR(r0) + + /* save kernel parameters */ + l.or r25,r0,r3 /* pointer to fdt */ + + /* + * ensure a deterministic start + */ + + l.ori r3,r0,0x1 + l.mtspr r0,r3,SPR_SR + + /* + * Start the TTCR as early as possible, so that the RNG can make use of + * measurements of boot time from the earliest opportunity. Especially + * important is that the TTCR does not return zero by the time we reach + * rand_initialize(). + */ + l.movhi r3,hi(SPR_TTMR_CR) + l.mtspr r0,r3,SPR_TTMR + + CLEAR_GPR(r1) + CLEAR_GPR(r2) + CLEAR_GPR(r3) + CLEAR_GPR(r4) + CLEAR_GPR(r5) + CLEAR_GPR(r6) + CLEAR_GPR(r7) + CLEAR_GPR(r8) + CLEAR_GPR(r9) + CLEAR_GPR(r10) + CLEAR_GPR(r11) + CLEAR_GPR(r12) + CLEAR_GPR(r13) + CLEAR_GPR(r14) + CLEAR_GPR(r15) + CLEAR_GPR(r16) + CLEAR_GPR(r17) + CLEAR_GPR(r18) + CLEAR_GPR(r19) + CLEAR_GPR(r20) + CLEAR_GPR(r21) + CLEAR_GPR(r22) + CLEAR_GPR(r23) + CLEAR_GPR(r24) + CLEAR_GPR(r26) + CLEAR_GPR(r27) + CLEAR_GPR(r28) + CLEAR_GPR(r29) + CLEAR_GPR(r30) + CLEAR_GPR(r31) + +#ifdef CONFIG_SMP + l.mfspr r26,r0,SPR_COREID + l.sfeq r26,r0 + l.bnf secondary_wait + l.nop +#endif + /* + * set up initial ksp and current + */ + /* setup kernel stack */ + LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE) + LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current + tophys (r31,r10) + l.sw TI_KSP(r31), r1 + + l.ori r4,r0,0x0 + + + /* + * .data contains initialized data, + * .bss contains uninitialized data - clear it up + */ +clear_bss: + LOAD_SYMBOL_2_GPR(r24, __bss_start) + LOAD_SYMBOL_2_GPR(r26, _end) + tophys(r28,r24) + tophys(r30,r26) + CLEAR_GPR(r24) + CLEAR_GPR(r26) +1: + l.sw (0)(r28),r0 + l.sfltu r28,r30 + l.bf 1b + l.addi r28,r28,4 + +enable_ic: + l.jal _ic_enable + l.nop + +enable_dc: + l.jal _dc_enable + l.nop + +flush_tlb: + l.jal _flush_tlb + l.nop + +/* The MMU needs to be enabled before or32_early_setup is called */ + +enable_mmu: + /* + * enable dmmu & immu + * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0 + */ + l.mfspr r30,r0,SPR_SR + l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) + l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) + l.or r30,r30,r28 + l.mtspr r0,r30,SPR_SR + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + + // reset the simulation counters + l.nop 5 + + /* check fdt header magic word */ + l.lwz r3,0(r25) /* load magic from fdt into r3 */ + l.movhi r4,hi(OF_DT_HEADER) + l.ori r4,r4,lo(OF_DT_HEADER) + l.sfeq r3,r4 + l.bf _fdt_found + l.nop + /* magic number mismatch, set fdt pointer to null */ + l.or r25,r0,r0 +_fdt_found: + /* pass fdt pointer to or32_early_setup in r3 */ + l.or r3,r0,r25 + LOAD_SYMBOL_2_GPR(r24, or32_early_setup) + l.jalr r24 + l.nop + +clear_regs: + /* + * clear all GPRS to increase determinism + */ + CLEAR_GPR(r2) + CLEAR_GPR(r3) + CLEAR_GPR(r4) + CLEAR_GPR(r5) + CLEAR_GPR(r6) + CLEAR_GPR(r7) + CLEAR_GPR(r8) + CLEAR_GPR(r9) + CLEAR_GPR(r11) + CLEAR_GPR(r12) + CLEAR_GPR(r13) + CLEAR_GPR(r14) + CLEAR_GPR(r15) + CLEAR_GPR(r16) + CLEAR_GPR(r17) + CLEAR_GPR(r18) + CLEAR_GPR(r19) + CLEAR_GPR(r20) + CLEAR_GPR(r21) + CLEAR_GPR(r22) + CLEAR_GPR(r23) + CLEAR_GPR(r24) + CLEAR_GPR(r25) + CLEAR_GPR(r26) + CLEAR_GPR(r27) + CLEAR_GPR(r28) + CLEAR_GPR(r29) + CLEAR_GPR(r30) + CLEAR_GPR(r31) + +jump_start_kernel: + /* + * jump to kernel entry (start_kernel) + */ + LOAD_SYMBOL_2_GPR(r30, start_kernel) + l.jr r30 + l.nop + +_flush_tlb: + /* + * I N V A L I D A T E T L B e n t r i e s + */ + LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0)) + LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0)) + l.addi r7,r0,128 /* Maximum number of sets */ +1: + l.mtspr r5,r0,0x0 + l.mtspr r6,r0,0x0 + + l.addi r5,r5,1 + l.addi r6,r6,1 + l.sfeq r7,r0 + l.bnf 1b + l.addi r7,r7,-1 + + l.jr r9 + l.nop + +#ifdef CONFIG_SMP +secondary_wait: + /* Doze the cpu until we are asked to run */ + /* If we dont have power management skip doze */ + l.mfspr r25,r0,SPR_UPR + l.andi r25,r25,SPR_UPR_PMP + l.sfeq r25,r0 + l.bf secondary_check_release + l.nop + + /* Setup special secondary exception handler */ + LOAD_SYMBOL_2_GPR(r3, _secondary_evbar) + tophys(r25,r3) + l.mtspr r0,r25,SPR_EVBAR + + /* Enable Interrupts */ + l.mfspr r25,r0,SPR_SR + l.ori r25,r25,SPR_SR_IEE + l.mtspr r0,r25,SPR_SR + + /* Unmask interrupts interrupts */ + l.mfspr r25,r0,SPR_PICMR + l.ori r25,r25,0xffff + l.mtspr r0,r25,SPR_PICMR + + /* Doze */ + l.mfspr r25,r0,SPR_PMR + LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME) + l.or r25,r25,r3 + l.mtspr r0,r25,SPR_PMR + + /* Wakeup - Restore exception handler */ + l.mtspr r0,r0,SPR_EVBAR + +secondary_check_release: + /* + * Check if we actually got the release signal, if not go-back to + * sleep. + */ + l.mfspr r25,r0,SPR_COREID + LOAD_SYMBOL_2_GPR(r3, secondary_release) + tophys(r4, r3) + l.lwz r3,0(r4) + l.sfeq r25,r3 + l.bnf secondary_wait + l.nop + /* fall through to secondary_init */ + +secondary_init: + /* + * set up initial ksp and current + */ + LOAD_SYMBOL_2_GPR(r10, secondary_thread_info) + tophys (r30,r10) + l.lwz r10,0(r30) + l.addi r1,r10,THREAD_SIZE + tophys (r30,r10) + l.sw TI_KSP(r30),r1 + + l.jal _ic_enable + l.nop + + l.jal _dc_enable + l.nop + + l.jal _flush_tlb + l.nop + + /* + * enable dmmu & immu + */ + l.mfspr r30,r0,SPR_SR + l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) + l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) + l.or r30,r30,r28 + /* + * This is a bit tricky, we need to switch over from physical addresses + * to virtual addresses on the fly. + * To do that, we first set up ESR with the IME and DME bits set. + * Then EPCR is set to secondary_start and then a l.rfe is issued to + * "jump" to that. + */ + l.mtspr r0,r30,SPR_ESR_BASE + LOAD_SYMBOL_2_GPR(r30, secondary_start) + l.mtspr r0,r30,SPR_EPCR_BASE + l.rfe + +secondary_start: + LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel) + l.jr r30 + l.nop + +#endif + +/* ========================================[ cache ]=== */ + + /* alignment here so we don't change memory offsets with + * memory controller defined + */ + .align 0x2000 + +_ic_enable: + /* Check if IC present and skip enabling otherwise */ + l.mfspr r24,r0,SPR_UPR + l.andi r26,r24,SPR_UPR_ICP + l.sfeq r26,r0 + l.bf 9f + l.nop + + /* Disable IC */ + l.mfspr r6,r0,SPR_SR + l.addi r5,r0,-1 + l.xori r5,r5,SPR_SR_ICE + l.and r5,r6,r5 + l.mtspr r0,r5,SPR_SR + + /* Establish cache block size + If BS=0, 16; + If BS=1, 32; + r14 contain block size + */ + l.mfspr r24,r0,SPR_ICCFGR + l.andi r26,r24,SPR_ICCFGR_CBS + l.srli r28,r26,7 + l.ori r30,r0,16 + l.sll r14,r30,r28 + + /* Establish number of cache sets + r16 contains number of cache sets + r28 contains log(# of cache sets) + */ + l.andi r26,r24,SPR_ICCFGR_NCS + l.srli r28,r26,3 + l.ori r30,r0,1 + l.sll r16,r30,r28 + + /* Invalidate IC */ + l.addi r6,r0,0 + l.sll r5,r14,r28 +// l.mul r5,r14,r16 +// l.trap 1 +// l.addi r5,r0,IC_SIZE +1: + l.mtspr r0,r6,SPR_ICBIR + l.sfne r6,r5 + l.bf 1b + l.add r6,r6,r14 + // l.addi r6,r6,IC_LINE + + /* Enable IC */ + l.mfspr r6,r0,SPR_SR + l.ori r6,r6,SPR_SR_ICE + l.mtspr r0,r6,SPR_SR + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop +9: + l.jr r9 + l.nop + +_dc_enable: + /* Check if DC present and skip enabling otherwise */ + l.mfspr r24,r0,SPR_UPR + l.andi r26,r24,SPR_UPR_DCP + l.sfeq r26,r0 + l.bf 9f + l.nop + + /* Disable DC */ + l.mfspr r6,r0,SPR_SR + l.addi r5,r0,-1 + l.xori r5,r5,SPR_SR_DCE + l.and r5,r6,r5 + l.mtspr r0,r5,SPR_SR + + /* Establish cache block size + If BS=0, 16; + If BS=1, 32; + r14 contain block size + */ + l.mfspr r24,r0,SPR_DCCFGR + l.andi r26,r24,SPR_DCCFGR_CBS + l.srli r28,r26,7 + l.ori r30,r0,16 + l.sll r14,r30,r28 + + /* Establish number of cache sets + r16 contains number of cache sets + r28 contains log(# of cache sets) + */ + l.andi r26,r24,SPR_DCCFGR_NCS + l.srli r28,r26,3 + l.ori r30,r0,1 + l.sll r16,r30,r28 + + /* Invalidate DC */ + l.addi r6,r0,0 + l.sll r5,r14,r28 +1: + l.mtspr r0,r6,SPR_DCBIR + l.sfne r6,r5 + l.bf 1b + l.add r6,r6,r14 + + /* Enable DC */ + l.mfspr r6,r0,SPR_SR + l.ori r6,r6,SPR_SR_DCE + l.mtspr r0,r6,SPR_SR +9: + l.jr r9 + l.nop + +/* ===============================================[ page table masks ]=== */ + +#define DTLB_UP_CONVERT_MASK 0x3fa +#define ITLB_UP_CONVERT_MASK 0x3a + +/* for SMP we'd have (this is a bit subtle, CC must be always set + * for SMP, but since we have _PAGE_PRESENT bit always defined + * we can just modify the mask) + */ +#define DTLB_SMP_CONVERT_MASK 0x3fb +#define ITLB_SMP_CONVERT_MASK 0x3b + +/* ---[ boot dtlb miss handler ]----------------------------------------- */ + +boot_dtlb_miss_handler: + +/* mask for DTLB_MR register: - (0) sets V (valid) bit, + * - (31-12) sets bits belonging to VPN (31-12) + */ +#define DTLB_MR_MASK 0xfffff001 + +/* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit, + * - (4) sets A (access) bit, + * - (5) sets D (dirty) bit, + * - (8) sets SRE (superuser read) bit + * - (9) sets SWE (superuser write) bit + * - (31-12) sets bits belonging to VPN (31-12) + */ +#define DTLB_TR_MASK 0xfffff332 + +/* These are for masking out the VPN/PPN value from the MR/TR registers... + * it's not the same as the PFN */ +#define VPN_MASK 0xfffff000 +#define PPN_MASK 0xfffff000 + + + EXCEPTION_STORE_GPR6 + +#if 0 + l.mfspr r6,r0,SPR_ESR_BASE // + l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? + l.sfeqi r6,0 // r6 == 0x1 --> SM + l.bf exit_with_no_dtranslation // + l.nop +#endif + + /* this could be optimized by moving storing of + * non r6 registers here, and jumping r6 restore + * if not in supervisor mode + */ + + EXCEPTION_STORE_GPR2 + EXCEPTION_STORE_GPR3 + EXCEPTION_STORE_GPR4 + EXCEPTION_STORE_GPR5 + + l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA + +immediate_translation: + CLEAR_GPR(r6) + + l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) + + l.mfspr r6, r0, SPR_DMMUCFGR + l.andi r6, r6, SPR_DMMUCFGR_NTS + l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF + l.ori r5, r0, 0x1 + l.sll r5, r5, r6 // r5 = number DMMU sets + l.addi r6, r5, -1 // r6 = nsets mask + l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK + + l.or r6,r6,r4 // r6 <- r4 + l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff + l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000 + l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK + l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry + l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR + + /* set up DTLB with no translation for EA <= 0xbfffffff */ + LOAD_SYMBOL_2_GPR(r6,0xbfffffff) + l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA) + l.bf 1f // goto out + l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) + + tophys(r3,r4) // r3 <- PA +1: + l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff + l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000 + l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK + l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry + l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR + + EXCEPTION_LOAD_GPR6 + EXCEPTION_LOAD_GPR5 + EXCEPTION_LOAD_GPR4 + EXCEPTION_LOAD_GPR3 + EXCEPTION_LOAD_GPR2 + + l.rfe // SR <- ESR, PC <- EPC + +exit_with_no_dtranslation: + /* EA out of memory or not in supervisor mode */ + EXCEPTION_LOAD_GPR6 + EXCEPTION_LOAD_GPR4 + l.j _dispatch_bus_fault + +/* ---[ boot itlb miss handler ]----------------------------------------- */ + +boot_itlb_miss_handler: + +/* mask for ITLB_MR register: - sets V (valid) bit, + * - sets bits belonging to VPN (15-12) + */ +#define ITLB_MR_MASK 0xfffff001 + +/* mask for ITLB_TR register: - sets A (access) bit, + * - sets SXE (superuser execute) bit + * - sets bits belonging to VPN (15-12) + */ +#define ITLB_TR_MASK 0xfffff050 + +/* +#define VPN_MASK 0xffffe000 +#define PPN_MASK 0xffffe000 +*/ + + + + EXCEPTION_STORE_GPR2 + EXCEPTION_STORE_GPR3 + EXCEPTION_STORE_GPR4 + EXCEPTION_STORE_GPR5 + EXCEPTION_STORE_GPR6 + +#if 0 + l.mfspr r6,r0,SPR_ESR_BASE // + l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? + l.sfeqi r6,0 // r6 == 0x1 --> SM + l.bf exit_with_no_itranslation + l.nop +#endif + + + l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA + +earlyearly: + CLEAR_GPR(r6) + + l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) + + l.mfspr r6, r0, SPR_IMMUCFGR + l.andi r6, r6, SPR_IMMUCFGR_NTS + l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF + l.ori r5, r0, 0x1 + l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR + l.addi r6, r5, -1 // r6 = nsets mask + l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK + + l.or r6,r6,r4 // r6 <- r4 + l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff + l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000 + l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK + l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry + l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR + + /* + * set up ITLB with no translation for EA <= 0x0fffffff + * + * we need this for head.S mapping (EA = PA). if we move all functions + * which run with mmu enabled into entry.S, we might be able to eliminate this. + * + */ + LOAD_SYMBOL_2_GPR(r6,0x0fffffff) + l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA) + l.bf 1f // goto out + l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) + + tophys(r3,r4) // r3 <- PA +1: + l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff + l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000 + l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK + l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry + l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR + + EXCEPTION_LOAD_GPR6 + EXCEPTION_LOAD_GPR5 + EXCEPTION_LOAD_GPR4 + EXCEPTION_LOAD_GPR3 + EXCEPTION_LOAD_GPR2 + + l.rfe // SR <- ESR, PC <- EPC + +exit_with_no_itranslation: + EXCEPTION_LOAD_GPR4 + EXCEPTION_LOAD_GPR6 + l.j _dispatch_bus_fault + l.nop + +/* ====================================================================== */ +/* + * Stuff below here shouldn't go into .head section... maybe this stuff + * can be moved to entry.S ??? + */ + +/* ==============================================[ DTLB miss handler ]=== */ + +/* + * Comments: + * Exception handlers are entered with MMU off so the following handler + * needs to use physical addressing + * + */ + + .text +ENTRY(dtlb_miss_handler) + EXCEPTION_STORE_GPR2 + EXCEPTION_STORE_GPR3 + EXCEPTION_STORE_GPR4 + /* + * get EA of the miss + */ + l.mfspr r2,r0,SPR_EEAR_BASE + /* + * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); + */ + GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r4 is temp + l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) + l.slli r4,r4,0x2 // to get address << 2 + l.add r3,r4,r3 // r4 is pgd_index(daddr) + /* + * if (pmd_none(*pmd)) + * goto pmd_none: + */ + tophys (r4,r3) + l.lwz r3,0x0(r4) // get *pmd value + l.sfne r3,r0 + l.bnf d_pmd_none + l.addi r3,r0,0xffffe000 // PAGE_MASK + +d_pmd_good: + /* + * pte = *pte_offset(pmd, daddr); + */ + l.lwz r4,0x0(r4) // get **pmd value + l.and r4,r4,r3 // & PAGE_MASK + l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR + l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 + l.slli r3,r3,0x2 // to get address << 2 + l.add r3,r3,r4 + l.lwz r3,0x0(r3) // this is pte at last + /* + * if (!pte_present(pte)) + */ + l.andi r4,r3,0x1 + l.sfne r4,r0 // is pte present + l.bnf d_pte_not_present + l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK + /* + * fill DTLB TR register + */ + l.and r4,r3,r4 // apply the mask + // Determine number of DMMU sets + l.mfspr r2, r0, SPR_DMMUCFGR + l.andi r2, r2, SPR_DMMUCFGR_NTS + l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF + l.ori r3, r0, 0x1 + l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR + l.addi r2, r3, -1 // r2 = nsets mask + l.mfspr r3, r0, SPR_EEAR_BASE + l.srli r3, r3, 0xd // >> PAGE_SHIFT + l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1) + //NUM_TLB_ENTRIES + l.mtspr r2,r4,SPR_DTLBTR_BASE(0) + /* + * fill DTLB MR register + */ + l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */ + l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry + l.mtspr r2,r4,SPR_DTLBMR_BASE(0) + + EXCEPTION_LOAD_GPR2 + EXCEPTION_LOAD_GPR3 + EXCEPTION_LOAD_GPR4 + l.rfe +d_pmd_none: +d_pte_not_present: + EXCEPTION_LOAD_GPR2 + EXCEPTION_LOAD_GPR3 + EXCEPTION_LOAD_GPR4 + EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler) + +/* ==============================================[ ITLB miss handler ]=== */ +ENTRY(itlb_miss_handler) + EXCEPTION_STORE_GPR2 + EXCEPTION_STORE_GPR3 + EXCEPTION_STORE_GPR4 + /* + * get EA of the miss + */ + l.mfspr r2,r0,SPR_EEAR_BASE + + /* + * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); + * + */ + GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r5 is temp + l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) + l.slli r4,r4,0x2 // to get address << 2 + l.add r3,r4,r3 // r4 is pgd_index(daddr) + /* + * if (pmd_none(*pmd)) + * goto pmd_none: + */ + tophys (r4,r3) + l.lwz r3,0x0(r4) // get *pmd value + l.sfne r3,r0 + l.bnf i_pmd_none + l.addi r3,r0,0xffffe000 // PAGE_MASK + +i_pmd_good: + /* + * pte = *pte_offset(pmd, iaddr); + * + */ + l.lwz r4,0x0(r4) // get **pmd value + l.and r4,r4,r3 // & PAGE_MASK + l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR + l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 + l.slli r3,r3,0x2 // to get address << 2 + l.add r3,r3,r4 + l.lwz r3,0x0(r3) // this is pte at last + /* + * if (!pte_present(pte)) + * + */ + l.andi r4,r3,0x1 + l.sfne r4,r0 // is pte present + l.bnf i_pte_not_present + l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK + /* + * fill ITLB TR register + */ + l.and r4,r3,r4 // apply the mask + l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE + l.sfeq r3,r0 + l.bf itlb_tr_fill //_workaround + // Determine number of IMMU sets + l.mfspr r2, r0, SPR_IMMUCFGR + l.andi r2, r2, SPR_IMMUCFGR_NTS + l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF + l.ori r3, r0, 0x1 + l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR + l.addi r2, r3, -1 // r2 = nsets mask + l.mfspr r3, r0, SPR_EEAR_BASE + l.srli r3, r3, 0xd // >> PAGE_SHIFT + l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1) + +/* + * __PHX__ :: fixme + * we should not just blindly set executable flags, + * but it does help with ping. the clean way would be to find out + * (and fix it) why stack doesn't have execution permissions + */ + +itlb_tr_fill_workaround: + l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE) +itlb_tr_fill: + l.mtspr r2,r4,SPR_ITLBTR_BASE(0) + /* + * fill DTLB MR register + */ + l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */ + l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry + l.mtspr r2,r4,SPR_ITLBMR_BASE(0) + + EXCEPTION_LOAD_GPR2 + EXCEPTION_LOAD_GPR3 + EXCEPTION_LOAD_GPR4 + l.rfe + +i_pmd_none: +i_pte_not_present: + EXCEPTION_LOAD_GPR2 + EXCEPTION_LOAD_GPR3 + EXCEPTION_LOAD_GPR4 + EXCEPTION_HANDLE(_itlb_miss_page_fault_handler) + +/* ==============================================[ boot tlb handlers ]=== */ + + +/* =================================================[ debugging aids ]=== */ + + .align 64 +_immu_trampoline: + .space 64 +_immu_trampoline_top: + +#define TRAMP_SLOT_0 (0x0) +#define TRAMP_SLOT_1 (0x4) +#define TRAMP_SLOT_2 (0x8) +#define TRAMP_SLOT_3 (0xc) +#define TRAMP_SLOT_4 (0x10) +#define TRAMP_SLOT_5 (0x14) +#define TRAMP_FRAME_SIZE (0x18) + +ENTRY(_immu_trampoline_workaround) + // r2 EEA + // r6 is physical EEA + tophys(r6,r2) + + LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) + tophys (r3,r5) // r3 is trampoline (physical) + + LOAD_SYMBOL_2_GPR(r4,0x15000000) + l.sw TRAMP_SLOT_0(r3),r4 + l.sw TRAMP_SLOT_1(r3),r4 + l.sw TRAMP_SLOT_4(r3),r4 + l.sw TRAMP_SLOT_5(r3),r4 + + // EPC = EEA - 0x4 + l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address) + l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data + l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address) + l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data + + l.srli r5,r4,26 // check opcode for write access + l.sfeqi r5,0 // l.j + l.bf 0f + l.sfeqi r5,0x11 // l.jr + l.bf 1f + l.sfeqi r5,1 // l.jal + l.bf 2f + l.sfeqi r5,0x12 // l.jalr + l.bf 3f + l.sfeqi r5,3 // l.bnf + l.bf 4f + l.sfeqi r5,4 // l.bf + l.bf 5f +99: + l.nop + l.j 99b // should never happen + l.nop 1 + + // r2 is EEA + // r3 is trampoline address (physical) + // r4 is instruction + // r6 is physical(EEA) + // + // r5 + +2: // l.jal + + /* 19 20 aa aa l.movhi r9,0xaaaa + * a9 29 bb bb l.ori r9,0xbbbb + * + * where 0xaaaabbbb is EEA + 0x4 shifted right 2 + */ + + l.addi r6,r2,0x4 // this is 0xaaaabbbb + + // l.movhi r9,0xaaaa + l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 + l.sh (TRAMP_SLOT_0+0x0)(r3),r5 + l.srli r5,r6,16 + l.sh (TRAMP_SLOT_0+0x2)(r3),r5 + + // l.ori r9,0xbbbb + l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 + l.sh (TRAMP_SLOT_1+0x0)(r3),r5 + l.andi r5,r6,0xffff + l.sh (TRAMP_SLOT_1+0x2)(r3),r5 + + /* falthrough, need to set up new jump offset */ + + +0: // l.j + l.slli r6,r4,6 // original offset shifted left 6 - 2 +// l.srli r6,r6,6 // original offset shifted right 2 + + l.slli r4,r2,4 // old jump position: EEA shifted left 4 +// l.srli r4,r4,6 // old jump position: shifted right 2 + + l.addi r5,r3,0xc // new jump position (physical) + l.slli r5,r5,4 // new jump position: shifted left 4 + + // calculate new jump offset + // new_off = old_off + (old_jump - new_jump) + + l.sub r5,r4,r5 // old_jump - new_jump + l.add r5,r6,r5 // orig_off + (old_jump - new_jump) + l.srli r5,r5,6 // new offset shifted right 2 + + // r5 is new jump offset + // l.j has opcode 0x0... + l.sw TRAMP_SLOT_2(r3),r5 // write it back + + l.j trampoline_out + l.nop + +/* ----------------------------- */ + +3: // l.jalr + + /* 19 20 aa aa l.movhi r9,0xaaaa + * a9 29 bb bb l.ori r9,0xbbbb + * + * where 0xaaaabbbb is EEA + 0x4 shifted right 2 + */ + + l.addi r6,r2,0x4 // this is 0xaaaabbbb + + // l.movhi r9,0xaaaa + l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 + l.sh (TRAMP_SLOT_0+0x0)(r3),r5 + l.srli r5,r6,16 + l.sh (TRAMP_SLOT_0+0x2)(r3),r5 + + // l.ori r9,0xbbbb + l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 + l.sh (TRAMP_SLOT_1+0x0)(r3),r5 + l.andi r5,r6,0xffff + l.sh (TRAMP_SLOT_1+0x2)(r3),r5 + + l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction + l.andi r5,r5,0x3ff // clear out opcode part + l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr + l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back + + /* falthrough */ + +1: // l.jr + l.j trampoline_out + l.nop + +/* ----------------------------- */ + +4: // l.bnf +5: // l.bf + l.slli r6,r4,6 // original offset shifted left 6 - 2 +// l.srli r6,r6,6 // original offset shifted right 2 + + l.slli r4,r2,4 // old jump position: EEA shifted left 4 +// l.srli r4,r4,6 // old jump position: shifted right 2 + + l.addi r5,r3,0xc // new jump position (physical) + l.slli r5,r5,4 // new jump position: shifted left 4 + + // calculate new jump offset + // new_off = old_off + (old_jump - new_jump) + + l.add r6,r6,r4 // (orig_off + old_jump) + l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump + l.srli r6,r6,6 // new offset shifted right 2 + + // r6 is new jump offset + l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction + l.srli r4,r4,16 + l.andi r4,r4,0xfc00 // get opcode part + l.slli r4,r4,16 + l.or r6,r4,r6 // l.b(n)f new offset + l.sw TRAMP_SLOT_2(r3),r6 // write it back + + /* we need to add l.j to EEA + 0x8 */ + tophys (r4,r2) // may not be needed (due to shifts down_ + l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8) + // jump position = r5 + 0x8 (0x8 compensated) + l.sub r4,r4,r5 // jump offset = target - new_position + 0x8 + + l.slli r4,r4,4 // the amount of info in imediate of jump + l.srli r4,r4,6 // jump instruction with offset + l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot + + /* fallthrough */ + +trampoline_out: + // set up new EPC to point to our trampoline code + LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) + l.mtspr r0,r5,SPR_EPCR_BASE + + // immu_trampoline is (4x) CACHE_LINE aligned + // and only 6 instructions long, + // so we need to invalidate only 2 lines + + /* Establish cache block size + If BS=0, 16; + If BS=1, 32; + r14 contain block size + */ + l.mfspr r21,r0,SPR_ICCFGR + l.andi r21,r21,SPR_ICCFGR_CBS + l.srli r21,r21,7 + l.ori r23,r0,16 + l.sll r14,r23,r21 + + l.mtspr r0,r5,SPR_ICBIR + l.add r5,r5,r14 + l.mtspr r0,r5,SPR_ICBIR + + l.jr r9 + l.nop + + +/* + * DSCR: prints a string referenced by r3. + * + * PRMS: r3 - address of the first character of null + * terminated string to be printed + * + * PREQ: UART at UART_BASE_ADD has to be initialized + * + * POST: caller should be aware that r3, r9 are changed + */ +ENTRY(_emergency_print) + EMERGENCY_PRINT_STORE_GPR4 + EMERGENCY_PRINT_STORE_GPR5 + EMERGENCY_PRINT_STORE_GPR6 + EMERGENCY_PRINT_STORE_GPR7 +2: + l.lbz r7,0(r3) + l.sfeq r7,r0 + l.bf 9f + l.nop + +// putc: + l.movhi r4,hi(UART_BASE_ADD) + + l.addi r6,r0,0x20 +1: l.lbz r5,5(r4) + l.andi r5,r5,0x20 + l.sfeq r5,r6 + l.bnf 1b + l.nop + + l.sb 0(r4),r7 + + l.addi r6,r0,0x60 +1: l.lbz r5,5(r4) + l.andi r5,r5,0x60 + l.sfeq r5,r6 + l.bnf 1b + l.nop + + /* next character */ + l.j 2b + l.addi r3,r3,0x1 + +9: + EMERGENCY_PRINT_LOAD_GPR7 + EMERGENCY_PRINT_LOAD_GPR6 + EMERGENCY_PRINT_LOAD_GPR5 + EMERGENCY_PRINT_LOAD_GPR4 + l.jr r9 + l.nop + +ENTRY(_emergency_print_nr) + EMERGENCY_PRINT_STORE_GPR4 + EMERGENCY_PRINT_STORE_GPR5 + EMERGENCY_PRINT_STORE_GPR6 + EMERGENCY_PRINT_STORE_GPR7 + EMERGENCY_PRINT_STORE_GPR8 + + l.addi r8,r0,32 // shift register + +1: /* remove leading zeros */ + l.addi r8,r8,-0x4 + l.srl r7,r3,r8 + l.andi r7,r7,0xf + + /* don't skip the last zero if number == 0x0 */ + l.sfeqi r8,0x4 + l.bf 2f + l.nop + + l.sfeq r7,r0 + l.bf 1b + l.nop + +2: + l.srl r7,r3,r8 + + l.andi r7,r7,0xf + l.sflts r8,r0 + l.bf 9f + + l.sfgtui r7,0x9 + l.bnf 8f + l.nop + l.addi r7,r7,0x27 + +8: + l.addi r7,r7,0x30 +// putc: + l.movhi r4,hi(UART_BASE_ADD) + + l.addi r6,r0,0x20 +1: l.lbz r5,5(r4) + l.andi r5,r5,0x20 + l.sfeq r5,r6 + l.bnf 1b + l.nop + + l.sb 0(r4),r7 + + l.addi r6,r0,0x60 +1: l.lbz r5,5(r4) + l.andi r5,r5,0x60 + l.sfeq r5,r6 + l.bnf 1b + l.nop + + /* next character */ + l.j 2b + l.addi r8,r8,-0x4 + +9: + EMERGENCY_PRINT_LOAD_GPR8 + EMERGENCY_PRINT_LOAD_GPR7 + EMERGENCY_PRINT_LOAD_GPR6 + EMERGENCY_PRINT_LOAD_GPR5 + EMERGENCY_PRINT_LOAD_GPR4 + l.jr r9 + l.nop + + +/* + * This should be used for debugging only. + * It messes up the Linux early serial output + * somehow, so use it sparingly and essentially + * only if you need to debug something that goes wrong + * before Linux gets the early serial going. + * + * Furthermore, you'll have to make sure you set the + * UART_DEVISOR correctly according to the system + * clock rate. + * + * + */ + + + +#define SYS_CLK 20000000 +//#define SYS_CLK 1843200 +#define OR32_CONSOLE_BAUD 115200 +#define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD) + +ENTRY(_early_uart_init) + l.movhi r3,hi(UART_BASE_ADD) + + l.addi r4,r0,0x7 + l.sb 0x2(r3),r4 + + l.addi r4,r0,0x0 + l.sb 0x1(r3),r4 + + l.addi r4,r0,0x3 + l.sb 0x3(r3),r4 + + l.lbz r5,3(r3) + l.ori r4,r5,0x80 + l.sb 0x3(r3),r4 + l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff) + l.sb UART_DLM(r3),r4 + l.addi r4,r0,((UART_DIVISOR) & 0x000000ff) + l.sb UART_DLL(r3),r4 + l.sb 0x3(r3),r5 + + l.jr r9 + l.nop + + .align 0x1000 + .global _secondary_evbar +_secondary_evbar: + + .space 0x800 + /* Just disable interrupts and Return */ + l.ori r3,r0,SPR_SR_SM + l.mtspr r0,r3,SPR_ESR_BASE + l.rfe + + + .section .rodata +_string_unhandled_exception: + .string "\n\rRunarunaround: Unhandled exception 0x\0" + +_string_epc_prefix: + .string ": EPC=0x\0" + +_string_nl: + .string "\n\r\0" + + +/* ========================================[ page aligned structures ]=== */ + +/* + * .data section should be page aligned + * (look into arch/openrisc/kernel/vmlinux.lds.S) + */ + .section .data,"aw" + .align 8192 + .global empty_zero_page +empty_zero_page: + .space 8192 + + .global swapper_pg_dir +swapper_pg_dir: + .space 8192 + + .global _unhandled_stack +_unhandled_stack: + .space 8192 +_unhandled_stack_top: + +/* ============================================================[ EOF ]=== */ diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c new file mode 100644 index 000000000..5f9445eff --- /dev/null +++ b/arch/openrisc/kernel/irq.c @@ -0,0 +1,47 @@ +/* + * OpenRISC irq.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/ftrace.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/export.h> +#include <linux/irqflags.h> + +/* read interrupt enabled status */ +unsigned long arch_local_save_flags(void) +{ + return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE); +} +EXPORT_SYMBOL(arch_local_save_flags); + +/* set interrupt enabled status */ +void arch_local_irq_restore(unsigned long flags) +{ + mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags)); +} +EXPORT_SYMBOL(arch_local_irq_restore); + +void __init init_IRQ(void) +{ + irqchip_init(); +} + +void __irq_entry do_IRQ(struct pt_regs *regs) +{ + handle_arch_irq(regs); +} diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c new file mode 100644 index 000000000..ef872ae4c --- /dev/null +++ b/arch/openrisc/kernel/module.c @@ -0,0 +1,70 @@ +/* + * OpenRISC module.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/moduleloader.h> +#include <linux/elf.h> + +int apply_relocate_add(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + uint32_t value; + + pr_debug("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + value = sym->st_value + rel[i].r_addend; + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_OR32_32: + *location = value; + break; + case R_OR32_CONST: + *((uint16_t *)location + 1) = value; + break; + case R_OR32_CONSTH: + *((uint16_t *)location + 1) = value >> 16; + break; + case R_OR32_JUMPTARG: + value -= (uint32_t)location; + value >>= 2; + value &= 0x03ffffff; + value |= *location & 0xfc000000; + *location = value; + break; + default: + pr_err("module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + break; + } + } + + return 0; +} diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c new file mode 100644 index 000000000..d7260fdb0 --- /dev/null +++ b/arch/openrisc/kernel/or32_ksyms.c @@ -0,0 +1,51 @@ +/* + * OpenRISC or32_ksyms.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/export.h> +#include <linux/elfcore.h> +#include <linux/sched.h> +#include <linux/in6.h> +#include <linux/interrupt.h> +#include <linux/vmalloc.h> +#include <linux/semaphore.h> + +#include <asm/processor.h> +#include <linux/uaccess.h> +#include <asm/checksum.h> +#include <asm/io.h> +#include <asm/hardirq.h> +#include <asm/delay.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> + +#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) + +/* compiler generated symbols */ +DECLARE_EXPORT(__udivsi3); +DECLARE_EXPORT(__divsi3); +DECLARE_EXPORT(__umodsi3); +DECLARE_EXPORT(__modsi3); +DECLARE_EXPORT(__muldi3); +DECLARE_EXPORT(__ashrdi3); +DECLARE_EXPORT(__ashldi3); +DECLARE_EXPORT(__lshrdi3); +DECLARE_EXPORT(__ucmpdi2); + +EXPORT_SYMBOL(empty_zero_page); +EXPORT_SYMBOL(__copy_tofrom_user); +EXPORT_SYMBOL(__clear_user); +EXPORT_SYMBOL(memset); diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c new file mode 100644 index 000000000..8739b6d75 --- /dev/null +++ b/arch/openrisc/kernel/process.c @@ -0,0 +1,284 @@ +/* + * OpenRISC process.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * This file handles the architecture-dependent parts of process handling... + */ + +#define __KERNEL_SYSCALLS__ +#include <stdarg.h> + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/ptrace.h> +#include <linux/slab.h> +#include <linux/elfcore.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/init_task.h> +#include <linux/mqueue.h> +#include <linux/fs.h> + +#include <linux/uaccess.h> +#include <asm/pgtable.h> +#include <asm/io.h> +#include <asm/processor.h> +#include <asm/spr_defs.h> + +#include <linux/smp.h> + +/* + * Pointer to Current thread info structure. + * + * Used at user space -> kernel transitions. + */ +struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, }; + +void machine_restart(void) +{ + printk(KERN_INFO "*** MACHINE RESTART ***\n"); + __asm__("l.nop 1"); +} + +/* + * Similar to machine_power_off, but don't shut off power. Add code + * here to freeze the system for e.g. post-mortem debug purpose when + * possible. This halt has nothing to do with the idle halt. + */ +void machine_halt(void) +{ + printk(KERN_INFO "*** MACHINE HALT ***\n"); + __asm__("l.nop 1"); +} + +/* If or when software power-off is implemented, add code here. */ +void machine_power_off(void) +{ + printk(KERN_INFO "*** MACHINE POWER OFF ***\n"); + __asm__("l.nop 1"); +} + +/* + * Send the doze signal to the cpu if available. + * Make sure, that all interrupts are enabled + */ +void arch_cpu_idle(void) +{ + local_irq_enable(); + if (mfspr(SPR_UPR) & SPR_UPR_PMP) + mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); +} + +void (*pm_power_off) (void) = machine_power_off; +EXPORT_SYMBOL(pm_power_off); + +/* + * When a process does an "exec", machine state like FPU and debug + * registers need to be reset. This is a hook function for that. + * Currently we don't have any such state to reset, so this is empty. + */ +void flush_thread(void) +{ +} + +void show_regs(struct pt_regs *regs) +{ + extern void show_registers(struct pt_regs *regs); + + show_regs_print_info(KERN_DEFAULT); + /* __PHX__ cleanup this mess */ + show_registers(regs); +} + +void release_thread(struct task_struct *dead_task) +{ +} + +/* + * Copy the thread-specific (arch specific) info from the current + * process to the new one p + */ +extern asmlinkage void ret_from_fork(void); + +/* + * copy_thread + * @clone_flags: flags + * @usp: user stack pointer or fn for kernel thread + * @arg: arg to fn for kernel thread; always NULL for userspace thread + * @p: the newly created task + * @regs: CPU context to copy for userspace thread; always NULL for kthread + * + * At the top of a newly initialized kernel stack are two stacked pt_reg + * structures. The first (topmost) is the userspace context of the thread. + * The second is the kernelspace context of the thread. + * + * A kernel thread will not be returning to userspace, so the topmost pt_regs + * struct can be uninitialized; it _does_ need to exist, though, because + * a kernel thread can become a userspace thread by doing a kernel_execve, in + * which case the topmost context will be initialized and used for 'returning' + * to userspace. + * + * The second pt_reg struct needs to be initialized to 'return' to + * ret_from_fork. A kernel thread will need to set r20 to the address of + * a function to call into (with arg in r22); userspace threads need to set + * r20 to NULL in which case ret_from_fork will just continue a return to + * userspace. + * + * A kernel thread 'fn' may return; this is effectively what happens when + * kernel_execve is called. In that case, the userspace pt_regs must have + * been initialized (which kernel_execve takes care of, see start_thread + * below); ret_from_fork will then continue its execution causing the + * 'kernel thread' to return to userspace as a userspace thread. + */ + +int +copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long arg, struct task_struct *p) +{ + struct pt_regs *userregs; + struct pt_regs *kregs; + unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; + unsigned long top_of_kernel_stack; + + top_of_kernel_stack = sp; + + /* Locate userspace context on stack... */ + sp -= STACK_FRAME_OVERHEAD; /* redzone */ + sp -= sizeof(struct pt_regs); + userregs = (struct pt_regs *) sp; + + /* ...and kernel context */ + sp -= STACK_FRAME_OVERHEAD; /* redzone */ + sp -= sizeof(struct pt_regs); + kregs = (struct pt_regs *)sp; + + if (unlikely(p->flags & PF_KTHREAD)) { + memset(kregs, 0, sizeof(struct pt_regs)); + kregs->gpr[20] = usp; /* fn, kernel thread */ + kregs->gpr[22] = arg; + } else { + *userregs = *current_pt_regs(); + + if (usp) + userregs->sp = usp; + + /* + * For CLONE_SETTLS set "tp" (r10) to the TLS pointer passed to sys_clone. + * + * The kernel entry is: + * int clone (long flags, void *child_stack, int *parent_tid, + * int *child_tid, struct void *tls) + * + * This makes the source r7 in the kernel registers. + */ + if (clone_flags & CLONE_SETTLS) + userregs->gpr[10] = userregs->gpr[7]; + + userregs->gpr[11] = 0; /* Result from fork() */ + + kregs->gpr[20] = 0; /* Userspace thread */ + } + + /* + * _switch wants the kernel stack page in pt_regs->sp so that it + * can restore it to thread_info->ksp... see _switch for details. + */ + kregs->sp = top_of_kernel_stack; + kregs->gpr[9] = (unsigned long)ret_from_fork; + + task_thread_info(p)->ksp = (unsigned long)kregs; + + return 0; +} + +/* + * Set up a thread for executing a new program + */ +void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) +{ + unsigned long sr = mfspr(SPR_SR) & ~SPR_SR_SM; + + memset(regs, 0, sizeof(struct pt_regs)); + + regs->pc = pc; + regs->sr = sr; + regs->sp = sp; +} + +/* Fill in the fpu structure for a core dump. */ +int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu) +{ + /* TODO */ + return 0; +} + +extern struct thread_info *_switch(struct thread_info *old_ti, + struct thread_info *new_ti); +extern int lwa_flag; + +struct task_struct *__switch_to(struct task_struct *old, + struct task_struct *new) +{ + struct task_struct *last; + struct thread_info *new_ti, *old_ti; + unsigned long flags; + + local_irq_save(flags); + + /* current_set is an array of saved current pointers + * (one for each cpu). we need them at user->kernel transition, + * while we save them at kernel->user transition + */ + new_ti = new->stack; + old_ti = old->stack; + + lwa_flag = 0; + + current_thread_info_set[smp_processor_id()] = new_ti; + last = (_switch(old_ti, new_ti))->task; + + local_irq_restore(flags); + + return last; +} + +/* + * Write out registers in core dump format, as defined by the + * struct user_regs_struct + */ +void dump_elf_thread(elf_greg_t *dest, struct pt_regs* regs) +{ + dest[0] = 0; /* r0 */ + memcpy(dest+1, regs->gpr+1, 31*sizeof(unsigned long)); + dest[32] = regs->pc; + dest[33] = regs->sr; + dest[34] = 0; + dest[35] = 0; +} + +unsigned long get_wchan(struct task_struct *p) +{ + /* TODO */ + + return 0; +} diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c new file mode 100644 index 000000000..6a44340d1 --- /dev/null +++ b/arch/openrisc/kernel/prom.c @@ -0,0 +1,32 @@ +/* + * OpenRISC prom.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Architecture specific procedures for creating, accessing and + * interpreting the device tree. + * + */ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/memblock.h> +#include <linux/of_fdt.h> + +#include <asm/page.h> + +void __init early_init_devtree(void *params) +{ + early_init_dt_scan(params); + memblock_allow_resize(); +} diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c new file mode 100644 index 000000000..eb97a8e7c --- /dev/null +++ b/arch/openrisc/kernel/ptrace.c @@ -0,0 +1,205 @@ +/* + * OpenRISC ptrace.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/string.h> + +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/audit.h> +#include <linux/regset.h> +#include <linux/tracehook.h> +#include <linux/elf.h> + +#include <asm/thread_info.h> +#include <asm/segment.h> +#include <asm/page.h> +#include <asm/pgtable.h> + +/* + * Copy the thread state to a regset that can be interpreted by userspace. + * + * It doesn't matter what our internal pt_regs structure looks like. The + * important thing is that we export a consistent view of the thread state + * to userspace. As such, we need to make sure that the regset remains + * ABI compatible as defined by the struct user_regs_struct: + * + * (Each item is a 32-bit word) + * r0 = 0 (exported for clarity) + * 31 GPRS r1-r31 + * PC (Program counter) + * SR (Supervision register) + */ +static int genregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user * ubuf) +{ + const struct pt_regs *regs = task_pt_regs(target); + int ret; + + /* r0 */ + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 0, 4); + + if (!ret) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->gpr+1, 4, 4*32); + if (!ret) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ®s->pc, 4*32, 4*33); + if (!ret) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ®s->sr, 4*33, 4*34); + if (!ret) + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + 4*34, -1); + + return ret; +} + +/* + * Set the thread state from a regset passed in via ptrace + */ +static int genregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user * ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + int ret; + + /* ignore r0 */ + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); + /* r1 - r31 */ + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->gpr+1, 4, 4*32); + /* PC */ + if (!ret) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->pc, 4*32, 4*33); + /* + * Skip SR and padding... userspace isn't allowed to changes bits in + * the Supervision register + */ + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + 4*33, -1); + + return ret; +} + +/* + * Define the register sets available on OpenRISC under Linux + */ +enum or1k_regset { + REGSET_GENERAL, +}; + +static const struct user_regset or1k_regsets[] = { + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(long), + .align = sizeof(long), + .get = genregs_get, + .set = genregs_set, + }, +}; + +static const struct user_regset_view user_or1k_native_view = { + .name = "or1k", + .e_machine = EM_OPENRISC, + .regsets = or1k_regsets, + .n = ARRAY_SIZE(or1k_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_or1k_native_view; +} + +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + pr_debug("ptrace_disable(): TODO\n"); + + user_disable_single_step(child); + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); +} + +long arch_ptrace(struct task_struct *child, long request, unsigned long addr, + unsigned long data) +{ + int ret; + + switch (request) { + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +/* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace + */ +asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) +{ + long ret = 0; + + if (test_thread_flag(TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) + /* + * Tracing decided this syscall should not happen. + * We'll return a bogus call number to get an ENOSYS + * error, but leave the original number in <something>. + */ + ret = -1L; + + audit_syscall_entry(regs->gpr[11], regs->gpr[3], regs->gpr[4], + regs->gpr[5], regs->gpr[6]); + + return ret ? : regs->gpr[11]; +} + +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) +{ + int step; + + audit_syscall_exit(regs); + + step = test_thread_flag(TIF_SINGLESTEP); + if (step || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, step); +} diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c new file mode 100644 index 000000000..f3a7375ac --- /dev/null +++ b/arch/openrisc/kernel/setup.c @@ -0,0 +1,415 @@ +/* + * OpenRISC setup.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * This file handles the architecture-dependent parts of initialization + */ + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/ptrace.h> +#include <linux/slab.h> +#include <linux/tty.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/console.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/seq_file.h> +#include <linux/serial.h> +#include <linux/initrd.h> +#include <linux/of_fdt.h> +#include <linux/of.h> +#include <linux/memblock.h> +#include <linux/device.h> + +#include <asm/sections.h> +#include <asm/segment.h> +#include <asm/pgtable.h> +#include <asm/types.h> +#include <asm/setup.h> +#include <asm/io.h> +#include <asm/cpuinfo.h> +#include <asm/delay.h> + +#include "vmlinux.h" + +static void __init setup_memory(void) +{ + unsigned long ram_start_pfn; + unsigned long ram_end_pfn; + phys_addr_t memory_start, memory_end; + struct memblock_region *region; + + memory_end = memory_start = 0; + + /* Find main memory where is the kernel, we assume its the only one */ + for_each_memblock(memory, region) { + memory_start = region->base; + memory_end = region->base + region->size; + printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, + memory_start, memory_end); + } + + if (!memory_end) { + panic("No memory!"); + } + + ram_start_pfn = PFN_UP(memory_start); + ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + /* setup bootmem globals (we use no_bootmem, but mm still depends on this) */ + min_low_pfn = ram_start_pfn; + max_low_pfn = ram_end_pfn; + max_pfn = ram_end_pfn; + + /* + * initialize the boot-time allocator (with low memory only). + * + * This makes the memory from the end of the kernel to the end of + * RAM usable. + */ + memblock_reserve(__pa(_stext), _end - _stext); + + early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + + memblock_dump_all(); +} + +struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS]; + +static void print_cpuinfo(void) +{ + unsigned long upr = mfspr(SPR_UPR); + unsigned long vr = mfspr(SPR_VR); + unsigned int version; + unsigned int revision; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + version = (vr & SPR_VR_VER) >> 24; + revision = (vr & SPR_VR_REV); + + printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n", + version, revision, cpuinfo->clock_frequency / 1000000); + + if (!(upr & SPR_UPR_UP)) { + printk(KERN_INFO + "-- no UPR register... unable to detect configuration\n"); + return; + } + + if (upr & SPR_UPR_DCP) + printk(KERN_INFO + "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n", + cpuinfo->dcache_size, cpuinfo->dcache_block_size, + cpuinfo->dcache_ways); + else + printk(KERN_INFO "-- dcache disabled\n"); + if (upr & SPR_UPR_ICP) + printk(KERN_INFO + "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n", + cpuinfo->icache_size, cpuinfo->icache_block_size, + cpuinfo->icache_ways); + else + printk(KERN_INFO "-- icache disabled\n"); + + if (upr & SPR_UPR_DMP) + printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n", + 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), + 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW)); + if (upr & SPR_UPR_IMP) + printk(KERN_INFO "-- immu: %4d entries, %lu way(s)\n", + 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), + 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW)); + + printk(KERN_INFO "-- additional features:\n"); + if (upr & SPR_UPR_DUP) + printk(KERN_INFO "-- debug unit\n"); + if (upr & SPR_UPR_PCUP) + printk(KERN_INFO "-- performance counters\n"); + if (upr & SPR_UPR_PMP) + printk(KERN_INFO "-- power management\n"); + if (upr & SPR_UPR_PICP) + printk(KERN_INFO "-- PIC\n"); + if (upr & SPR_UPR_TTP) + printk(KERN_INFO "-- timer\n"); + if (upr & SPR_UPR_CUP) + printk(KERN_INFO "-- custom unit(s)\n"); +} + +static struct device_node *setup_find_cpu_node(int cpu) +{ + u32 hwid; + struct device_node *cpun; + struct device_node *cpus = of_find_node_by_path("/cpus"); + + for_each_available_child_of_node(cpus, cpun) { + if (of_property_read_u32(cpun, "reg", &hwid)) + continue; + if (hwid == cpu) + return cpun; + } + + return NULL; +} + +void __init setup_cpuinfo(void) +{ + struct device_node *cpu; + unsigned long iccfgr, dccfgr; + unsigned long cache_set_size; + int cpu_id = smp_processor_id(); + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id]; + + cpu = setup_find_cpu_node(cpu_id); + if (!cpu) + panic("Couldn't find CPU%d in device tree...\n", cpu_id); + + iccfgr = mfspr(SPR_ICCFGR); + cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW); + cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); + cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); + cpuinfo->icache_size = + cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size; + + dccfgr = mfspr(SPR_DCCFGR); + cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW); + cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); + cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); + cpuinfo->dcache_size = + cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size; + + if (of_property_read_u32(cpu, "clock-frequency", + &cpuinfo->clock_frequency)) { + printk(KERN_WARNING + "Device tree missing CPU 'clock-frequency' parameter." + "Assuming frequency 25MHZ" + "This is probably not what you want."); + } + + cpuinfo->coreid = mfspr(SPR_COREID); + + of_node_put(cpu); + + print_cpuinfo(); +} + +/** + * or32_early_setup + * + * Handles the pointer to the device tree that this kernel is to use + * for establishing the available platform devices. + * + * Falls back on built-in device tree in case null pointer is passed. + */ + +void __init or32_early_setup(void *fdt) +{ + if (fdt) + pr_info("FDT at %p\n", fdt); + else { + fdt = __dtb_start; + pr_info("Compiled-in FDT at %p\n", fdt); + } + early_init_devtree(fdt); +} + +static inline unsigned long extract_value_bits(unsigned long reg, + short bit_nr, short width) +{ + return (reg >> bit_nr) & (0 << width); +} + +static inline unsigned long extract_value(unsigned long reg, unsigned long mask) +{ + while (!(mask & 0x1)) { + reg = reg >> 1; + mask = mask >> 1; + } + return mask & reg; +} + +void __init detect_unit_config(unsigned long upr, unsigned long mask, + char *text, void (*func) (void)) +{ + if (text != NULL) + printk("%s", text); + + if (upr & mask) { + if (func != NULL) + func(); + else + printk("present\n"); + } else + printk("not present\n"); +} + +/* + * calibrate_delay + * + * Lightweight calibrate_delay implementation that calculates loops_per_jiffy + * from the clock frequency passed in via the device tree + * + */ + +void calibrate_delay(void) +{ + const int *val; + struct device_node *cpu = setup_find_cpu_node(smp_processor_id()); + + val = of_get_property(cpu, "clock-frequency", NULL); + if (!val) + panic("no cpu 'clock-frequency' parameter in device tree"); + loops_per_jiffy = *val / HZ; + pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", + loops_per_jiffy / (500000 / HZ), + (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); + + of_node_put(cpu); +} + +void __init setup_arch(char **cmdline_p) +{ + unflatten_and_copy_device_tree(); + + setup_cpuinfo(); + +#ifdef CONFIG_SMP + smp_init_cpus(); +#endif + + /* process 1's initial memory region is the kernel code/data */ + init_mm.start_code = (unsigned long)_stext; + init_mm.end_code = (unsigned long)_etext; + init_mm.end_data = (unsigned long)_edata; + init_mm.brk = (unsigned long)_end; + +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = (unsigned long)&__initrd_start; + initrd_end = (unsigned long)&__initrd_end; + if (initrd_start == initrd_end) { + initrd_start = 0; + initrd_end = 0; + } + initrd_below_start_ok = 1; +#endif + + /* setup memblock allocator */ + setup_memory(); + + /* paging_init() sets up the MMU and marks all pages as reserved */ + paging_init(); + +#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) + if (!conswitchp) + conswitchp = &dummy_con; +#endif + + *cmdline_p = boot_command_line; + + printk(KERN_INFO "OpenRISC Linux -- http://openrisc.io\n"); +} + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + unsigned int vr, cpucfgr; + unsigned int avr; + unsigned int version; + struct cpuinfo_or1k *cpuinfo = v; + + vr = mfspr(SPR_VR); + cpucfgr = mfspr(SPR_CPUCFGR); + +#ifdef CONFIG_SMP + seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid); +#endif + if (vr & SPR_VR_UVRP) { + vr = mfspr(SPR_VR2); + version = vr & SPR_VR2_VER; + avr = mfspr(SPR_AVR); + seq_printf(m, "cpu architecture\t: " + "OpenRISC 1000 (%d.%d-rev%d)\n", + (avr >> 24) & 0xff, + (avr >> 16) & 0xff, + (avr >> 8) & 0xff); + seq_printf(m, "cpu implementation id\t: 0x%x\n", + (vr & SPR_VR2_CPUID) >> 24); + seq_printf(m, "cpu version\t\t: 0x%x\n", version); + } else { + version = (vr & SPR_VR_VER) >> 24; + seq_printf(m, "cpu\t\t\t: OpenRISC-%x\n", version); + seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV); + } + seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ); + seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size); + seq_printf(m, "dcache block size\t: %d bytes\n", + cpuinfo->dcache_block_size); + seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways); + seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size); + seq_printf(m, "icache block size\t: %d bytes\n", + cpuinfo->icache_block_size); + seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways); + seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n", + 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), + 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW)); + seq_printf(m, "dmmu\t\t\t: %d entries, %lu ways\n", + 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), + 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW)); + seq_printf(m, "bogomips\t\t: %lu.%02lu\n", + (loops_per_jiffy * HZ) / 500000, + ((loops_per_jiffy * HZ) / 5000) % 100); + + seq_puts(m, "features\t\t: "); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB32S ? "orbis32" : ""); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB64S ? "orbis64" : ""); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF32S ? "orfpx32" : ""); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF64S ? "orfpx64" : ""); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OV64S ? "orvdx64" : ""); + seq_puts(m, "\n"); + + seq_puts(m, "\n"); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + *pos = cpumask_next(*pos - 1, cpu_online_mask); + if ((*pos) < nr_cpu_ids) + return &cpuinfo_or1k[*pos]; + return NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + (*pos)++; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c new file mode 100644 index 000000000..265f10fb3 --- /dev/null +++ b/arch/openrisc/kernel/signal.c @@ -0,0 +1,326 @@ +/* + * OpenRISC signal.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/unistd.h> +#include <linux/stddef.h> +#include <linux/tracehook.h> + +#include <asm/processor.h> +#include <asm/syscall.h> +#include <asm/ucontext.h> +#include <linux/uaccess.h> + +#define DEBUG_SIG 0 + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; + unsigned char retcode[16]; /* trampoline code */ +}; + +static int restore_sigcontext(struct pt_regs *regs, + struct sigcontext __user *sc) +{ + int err = 0; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* + * Restore the regs from &sc->regs. + * (sc is already checked for VERIFY_READ since the sigframe was + * checked in sys_sigreturn previously) + */ + err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)); + err |= __copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long)); + err |= __copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long)); + + /* make sure the SM-bit is cleared so user-mode cannot fool us */ + regs->sr &= ~SPR_SR_SM; + + regs->orig_gpr11 = -1; /* Avoid syscall restart checks */ + + /* TODO: the other ports use regs->orig_XX to disable syscall checks + * after this completes, but we don't use that mechanism. maybe we can + * use it now ? + */ + + return err; +} + +asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp; + sigset_t set; + + /* + * Since we stacked the signal on a dword boundary, + * then frame should be dword aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (((long)frame) & 3) + goto badframe; + + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + set_current_blocked(&set); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return regs->gpr[11]; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +/* + * Set up a signal frame. + */ + +static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int err = 0; + + /* copy the regs */ + /* There should be no need to save callee-saved registers here... + * ...but we save them anyway. Revisit this + */ + err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long)); + err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long)); + err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long)); + + return err; +} + +static inline unsigned long align_sigframe(unsigned long sp) +{ + return sp & ~3UL; +} + +/* + * Work out where the signal frame should go. It's either on the user stack + * or the alternate stack. + */ + +static inline void __user *get_sigframe(struct ksignal *ksig, + struct pt_regs *regs, size_t frame_size) +{ + unsigned long sp = regs->sp; + + /* redzone */ + sp -= STACK_FRAME_OVERHEAD; + sp = sigsp(sp, ksig); + sp = align_sigframe(sp - frame_size); + + return (void __user *)sp; +} + +/* grab and setup a signal frame. + * + * basically we stack a lot of state info, and arrange for the + * user-mode program to return to the kernel using either a + * trampoline which performs the syscall sigreturn, or a provided + * user-mode trampoline. + */ +static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) +{ + struct rt_sigframe *frame; + unsigned long return_ip; + int err = 0; + + frame = get_sigframe(ksig, regs, sizeof(*frame)); + + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + return -EFAULT; + + /* Create siginfo. */ + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(NULL, &frame->uc.uc_link); + err |= __save_altstack(&frame->uc.uc_stack, regs->sp); + err |= setup_sigcontext(regs, &frame->uc.uc_mcontext); + + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + if (err) + return -EFAULT; + + /* trampoline - the desired return ip is the retcode itself */ + return_ip = (unsigned long)&frame->retcode; + /* This is: + l.ori r11,r0,__NR_sigreturn + l.sys 1 + */ + err |= __put_user(0xa960, (short *)(frame->retcode + 0)); + err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2)); + err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); + err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); + + if (err) + return -EFAULT; + + /* Set up registers for signal handler */ + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; /* what we enter NOW */ + regs->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ + regs->gpr[3] = (unsigned long)ksig->sig; /* arg 1: signo */ + regs->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ + regs->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ + + /* actually move the usp to reflect the stacked frame */ + regs->sp = (unsigned long)frame; + + return 0; +} + +static inline void +handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + int ret; + + ret = setup_rt_frame(ksig, sigmask_to_save(), regs); + + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Also note that the regs structure given here as an argument, is the latest + * pushed pt_regs. It may or may not be the same as the first pushed registers + * when the initial usermode->kernelmode transition took place. Therefore + * we can use user_mode(regs) to see if we came directly from kernel or user + * mode below. + */ + +int do_signal(struct pt_regs *regs, int syscall) +{ + struct ksignal ksig; + unsigned long continue_addr = 0; + unsigned long restart_addr = 0; + unsigned long retval = 0; + int restart = 0; + + if (syscall) { + continue_addr = regs->pc; + restart_addr = continue_addr - 4; + retval = regs->gpr[11]; + + /* + * Setup syscall restart here so that a debugger will + * see the already changed PC. + */ + switch (retval) { + case -ERESTART_RESTARTBLOCK: + restart = -2; + /* Fall through */ + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + restart++; + regs->gpr[11] = regs->orig_gpr11; + regs->pc = restart_addr; + break; + } + } + + /* + * Get the signal to deliver. During the call to get_signal the + * debugger may change all our registers so we may need to revert + * the decision to restart the syscall; specifically, if the PC is + * changed, don't restart the syscall. + */ + if (get_signal(&ksig)) { + if (unlikely(restart) && regs->pc == restart_addr) { + if (retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK + || (retval == -ERESTARTSYS + && !(ksig.ka.sa.sa_flags & SA_RESTART))) { + /* No automatic restart */ + regs->gpr[11] = -EINTR; + regs->pc = continue_addr; + } + } + handle_signal(&ksig, regs); + } else { + /* no handler */ + restore_saved_sigmask(); + /* + * Restore pt_regs PC as syscall restart will be handled by + * kernel without return to userspace + */ + if (unlikely(restart) && regs->pc == restart_addr) { + regs->pc = continue_addr; + return restart; + } + } + + return 0; +} + +asmlinkage int +do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) +{ + do { + if (likely(thread_flags & _TIF_NEED_RESCHED)) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) + return 0; + local_irq_enable(); + if (thread_flags & _TIF_SIGPENDING) { + int restart = do_signal(regs, syscall); + if (unlikely(restart)) { + /* + * Restart without handlers. + * Deal with it without leaving + * the kernel space. + */ + return restart; + } + syscall = 0; + } else { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + } + local_irq_disable(); + thread_flags = current_thread_info()->flags; + } while (thread_flags & _TIF_WORK_MASK); + return 0; +} diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c new file mode 100644 index 000000000..7d518ee8b --- /dev/null +++ b/arch/openrisc/kernel/smp.c @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * Based on arm64 and arc implementations + * Copyright (C) 2013 ARM Ltd. + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/smp.h> +#include <linux/cpu.h> +#include <linux/sched.h> +#include <linux/irq.h> +#include <asm/cpuinfo.h> +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> +#include <asm/time.h> + +static void (*smp_cross_call)(const struct cpumask *, unsigned int); + +unsigned long secondary_release = -1; +struct thread_info *secondary_thread_info; + +enum ipi_msg_type { + IPI_WAKEUP, + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CALL_FUNC_SINGLE, +}; + +static DEFINE_SPINLOCK(boot_lock); + +static void boot_secondary(unsigned int cpu, struct task_struct *idle) +{ + /* + * set synchronisation state between this boot processor + * and the secondary one + */ + spin_lock(&boot_lock); + + secondary_release = cpu; + smp_cross_call(cpumask_of(cpu), IPI_WAKEUP); + + /* + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ + spin_unlock(&boot_lock); +} + +void __init smp_prepare_boot_cpu(void) +{ +} + +void __init smp_init_cpus(void) +{ + int i; + + for (i = 0; i < NR_CPUS; i++) + set_cpu_possible(i, true); +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + int i; + + /* + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. + */ + for (i = 0; i < max_cpus; i++) + set_cpu_present(i, true); +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +static DECLARE_COMPLETION(cpu_running); + +int __cpu_up(unsigned int cpu, struct task_struct *idle) +{ + if (smp_cross_call == NULL) { + pr_warn("CPU%u: failed to start, IPI controller missing", + cpu); + return -EIO; + } + + secondary_thread_info = task_thread_info(idle); + current_pgd[cpu] = init_mm.pgd; + + boot_secondary(cpu, idle); + if (!wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(1000))) { + pr_crit("CPU%u: failed to start\n", cpu); + return -EIO; + } + synchronise_count_master(cpu); + + return 0; +} + +asmlinkage __init void secondary_start_kernel(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + /* + * All kernel threads share the same mm context; grab a + * reference and switch to it. + */ + atomic_inc(&mm->mm_count); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + + pr_info("CPU%u: Booted secondary processor\n", cpu); + + setup_cpuinfo(); + openrisc_clockevent_init(); + + notify_cpu_starting(cpu); + + /* + * OK, now it's safe to let the boot CPU continue + */ + complete(&cpu_running); + + synchronise_count_slave(cpu); + set_cpu_online(cpu, true); + + local_irq_enable(); + + preempt_disable(); + /* + * OK, it's off to the idle thread for us + */ + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +void handle_IPI(unsigned int ipi_msg) +{ + unsigned int cpu = smp_processor_id(); + + switch (ipi_msg) { + case IPI_WAKEUP: + break; + + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; + + default: + WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg); + break; + } +} + +void smp_send_reschedule(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); +} + +static void stop_this_cpu(void *dummy) +{ + /* Remove this CPU */ + set_cpu_online(smp_processor_id(), false); + + local_irq_disable(); + /* CPU Doze */ + if (mfspr(SPR_UPR) & SPR_UPR_PMP) + mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); + /* If that didn't work, infinite loop */ + while (1) + ; +} + +void smp_send_stop(void) +{ + smp_call_function(stop_this_cpu, NULL, 0); +} + +/* not supported, yet */ +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + +void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) +{ + smp_cross_call = fn; +} + +void arch_send_call_function_single_ipi(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_CALL_FUNC); +} + +/* TLB flush operations - Performed on each CPU*/ +static inline void ipi_flush_tlb_all(void *ignored) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +/* + * FIXME: implement proper functionality instead of flush_tlb_all. + * *But*, as things currently stands, the local_tlb_flush_* functions will + * all boil down to local_tlb_flush_all anyway. + */ +void flush_tlb_mm(struct mm_struct *mm) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +/* Instruction cache invalidate - performed on each cpu */ +static void ipi_icache_page_inv(void *arg) +{ + struct page *page = arg; + + local_icache_page_inv(page); +} + +void smp_icache_page_inv(struct page *page) +{ + on_each_cpu(ipi_icache_page_inv, page, 1); +} +EXPORT_SYMBOL(smp_icache_page_inv); diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c new file mode 100644 index 000000000..54d38809e --- /dev/null +++ b/arch/openrisc/kernel/stacktrace.c @@ -0,0 +1,100 @@ +/* + * Stack trace utility for OpenRISC + * + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + * Losely based on work from sh and powerpc. + */ + +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> +#include <linux/stacktrace.h> + +#include <asm/processor.h> +#include <asm/unwinder.h> + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +static void +save_stack_address(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = data; + + if (!reliable) + return; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +void save_stack_trace(struct stack_trace *trace) +{ + unwind_stack(trace, (unsigned long *) &trace, save_stack_address); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +static void +save_stack_address_nosched(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = (struct stack_trace *)data; + + if (!reliable) + return; + + if (in_sched_functions(addr)) + return; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + unsigned long *sp = NULL; + + if (!try_get_task_stack(tsk)) + return; + + if (tsk == current) + sp = (unsigned long *) &sp; + else { + unsigned long ksp; + + /* Locate stack from kernel context */ + ksp = task_thread_info(tsk)->ksp; + ksp += STACK_FRAME_OVERHEAD; /* redzone */ + ksp += sizeof(struct pt_regs); + + sp = (unsigned long *) ksp; + } + + unwind_stack(trace, sp, save_stack_address_nosched); + + put_task_stack(tsk); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void +save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + unwind_stack(trace, (unsigned long *) regs->sp, + save_stack_address_nosched); +} +EXPORT_SYMBOL_GPL(save_stack_trace_regs); diff --git a/arch/openrisc/kernel/sync-timer.c b/arch/openrisc/kernel/sync-timer.c new file mode 100644 index 000000000..ed8d835ca --- /dev/null +++ b/arch/openrisc/kernel/sync-timer.c @@ -0,0 +1,120 @@ +/* + * OR1K timer synchronisation + * + * Based on work from MIPS implementation. + * + * All CPUs will have their count registers synchronised to the CPU0 next time + * value. This can cause a small timewarp for CPU0. All other CPU's should + * not have done anything significant (but they may have had interrupts + * enabled briefly - prom_smp_finish() should not be responsible for enabling + * interrupts...) + */ + +#include <linux/kernel.h> +#include <linux/irqflags.h> +#include <linux/cpumask.h> + +#include <asm/time.h> +#include <asm/timex.h> +#include <linux/atomic.h> +#include <asm/barrier.h> + +#include <asm/spr.h> + +static unsigned int initcount; +static atomic_t count_count_start = ATOMIC_INIT(0); +static atomic_t count_count_stop = ATOMIC_INIT(0); + +#define COUNTON 100 +#define NR_LOOPS 3 + +void synchronise_count_master(int cpu) +{ + int i; + unsigned long flags; + + pr_info("Synchronize counters for CPU %u: ", cpu); + + local_irq_save(flags); + + /* + * We loop a few times to get a primed instruction cache, + * then the last pass is more or less synchronised and + * the master and slaves each set their cycle counters to a known + * value all at once. This reduces the chance of having random offsets + * between the processors, and guarantees that the maximum + * delay between the cycle counters is never bigger than + * the latency of information-passing (cachelines) between + * two CPUs. + */ + + for (i = 0; i < NR_LOOPS; i++) { + /* slaves loop on '!= 2' */ + while (atomic_read(&count_count_start) != 1) + mb(); + atomic_set(&count_count_stop, 0); + smp_wmb(); + + /* Let the slave writes its count register */ + atomic_inc(&count_count_start); + + /* Count will be initialised to current timer */ + if (i == 1) + initcount = get_cycles(); + + /* + * Everyone initialises count in the last loop: + */ + if (i == NR_LOOPS-1) + openrisc_timer_set(initcount); + + /* + * Wait for slave to leave the synchronization point: + */ + while (atomic_read(&count_count_stop) != 1) + mb(); + atomic_set(&count_count_start, 0); + smp_wmb(); + atomic_inc(&count_count_stop); + } + /* Arrange for an interrupt in a short while */ + openrisc_timer_set_next(COUNTON); + + local_irq_restore(flags); + + /* + * i386 code reported the skew here, but the + * count registers were almost certainly out of sync + * so no point in alarming people + */ + pr_cont("done.\n"); +} + +void synchronise_count_slave(int cpu) +{ + int i; + + /* + * Not every cpu is online at the time this gets called, + * so we first wait for the master to say everyone is ready + */ + + for (i = 0; i < NR_LOOPS; i++) { + atomic_inc(&count_count_start); + while (atomic_read(&count_count_start) != 2) + mb(); + + /* + * Everyone initialises count in the last loop: + */ + if (i == NR_LOOPS-1) + openrisc_timer_set(initcount); + + atomic_inc(&count_count_stop); + while (atomic_read(&count_count_stop) != 2) + mb(); + } + /* Arrange for an interrupt in a short while */ + openrisc_timer_set_next(COUNTON); +} +#undef NR_LOOPS diff --git a/arch/openrisc/kernel/sys_call_table.c b/arch/openrisc/kernel/sys_call_table.c new file mode 100644 index 000000000..e1f8ce8c7 --- /dev/null +++ b/arch/openrisc/kernel/sys_call_table.c @@ -0,0 +1,28 @@ +/* + * OpenRISC sys_call_table.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/syscalls.h> +#include <linux/signal.h> +#include <linux/unistd.h> + +#include <asm/syscalls.h> + +#undef __SYSCALL +#define __SYSCALL(nr, call) [nr] = (call), + +void *sys_call_table[__NR_syscalls] = { +#include <asm/unistd.h> +}; diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c new file mode 100644 index 000000000..6baecea27 --- /dev/null +++ b/arch/openrisc/kernel/time.c @@ -0,0 +1,176 @@ +/* + * OpenRISC time.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/interrupt.h> +#include <linux/ftrace.h> + +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/irq.h> +#include <linux/io.h> + +#include <asm/cpuinfo.h> + +/* Test the timer ticks to count, used in sync routine */ +inline void openrisc_timer_set(unsigned long count) +{ + mtspr(SPR_TTCR, count); +} + +/* Set the timer to trigger in delta cycles */ +inline void openrisc_timer_set_next(unsigned long delta) +{ + u32 c; + + /* Read 32-bit counter value, add delta, mask off the low 28 bits. + * We're guaranteed delta won't be bigger than 28 bits because the + * generic timekeeping code ensures that for us. + */ + c = mfspr(SPR_TTCR); + c += delta; + c &= SPR_TTMR_TP; + + /* Set counter and enable interrupt. + * Keep timer in continuous mode always. + */ + mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c); +} + +static int openrisc_timer_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + openrisc_timer_set_next(delta); + return 0; +} + +/* This is the clock event device based on the OR1K tick timer. + * As the timer is being used as a continuous clock-source (required for HR + * timers) we cannot enable the PERIODIC feature. The tick timer can run using + * one-shot events, so no problem. + */ +DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer); + +void openrisc_clockevent_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = + &per_cpu(clockevent_openrisc_timer, cpu); + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu]; + + mtspr(SPR_TTMR, SPR_TTMR_CR); + +#ifdef CONFIG_SMP + evt->broadcast = tick_broadcast; +#endif + evt->name = "openrisc_timer_clockevent", + evt->features = CLOCK_EVT_FEAT_ONESHOT, + evt->rating = 300, + evt->set_next_event = openrisc_timer_set_next_event, + + evt->cpumask = cpumask_of(cpu); + + /* We only have 28 bits */ + clockevents_config_and_register(evt, cpuinfo->clock_frequency, + 100, 0x0fffffff); + +} + +static inline void timer_ack(void) +{ + /* Clear the IP bit and disable further interrupts */ + /* This can be done very simply... we just need to keep the timer + running, so just maintain the CR bits while clearing the rest + of the register + */ + mtspr(SPR_TTMR, SPR_TTMR_CR); +} + +/* + * The timer interrupt is mostly handled in generic code nowadays... this + * function just acknowledges the interrupt and fires the event handler that + * has been set on the clockevent device by the generic time management code. + * + * This function needs to be called by the timer exception handler and that's + * all the exception handler needs to do. + */ + +irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = + &per_cpu(clockevent_openrisc_timer, cpu); + + timer_ack(); + + /* + * update_process_times() expects us to have called irq_enter(). + */ + irq_enter(); + evt->event_handler(evt); + irq_exit(); + + set_irq_regs(old_regs); + + return IRQ_HANDLED; +} + +/** + * Clocksource: Based on OpenRISC timer/counter + * + * This sets up the OpenRISC Tick Timer as a clock source. The tick timer + * is 32 bits wide and runs at the CPU clock frequency. + */ +static u64 openrisc_timer_read(struct clocksource *cs) +{ + return (u64) mfspr(SPR_TTCR); +} + +static struct clocksource openrisc_timer = { + .name = "openrisc_timer", + .rating = 200, + .read = openrisc_timer_read, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init openrisc_timer_init(void) +{ + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + if (clocksource_register_hz(&openrisc_timer, cpuinfo->clock_frequency)) + panic("failed to register clocksource"); + + /* Enable the incrementer: 'continuous' mode with interrupt disabled */ + mtspr(SPR_TTMR, SPR_TTMR_CR); + + return 0; +} + +void __init time_init(void) +{ + u32 upr; + + upr = mfspr(SPR_UPR); + if (!(upr & SPR_UPR_TTP)) + panic("Linux not supported on devices without tick timer"); + + openrisc_timer_init(); + openrisc_clockevent_init(); +} diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c new file mode 100644 index 000000000..d8981cbb8 --- /dev/null +++ b/arch/openrisc/kernel/traps.c @@ -0,0 +1,477 @@ +/* + * OpenRISC traps.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Here we handle the break vectors not used by the system call + * mechanism, as well as some general stack/register dumping + * things. + * + */ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> +#include <linux/kernel.h> +#include <linux/extable.h> +#include <linux/kmod.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/timer.h> +#include <linux/mm.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> + +#include <asm/segment.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/unwinder.h> +#include <asm/sections.h> + +int kstack_depth_to_print = 0x180; +int lwa_flag; +unsigned long __user *lwa_addr; + +void print_trace(void *data, unsigned long addr, int reliable) +{ + pr_emerg("[<%p>] %s%pS\n", (void *) addr, reliable ? "" : "? ", + (void *) addr); +} + +/* displays a short stack trace */ +void show_stack(struct task_struct *task, unsigned long *esp) +{ + if (esp == NULL) + esp = (unsigned long *)&esp; + + pr_emerg("Call trace:\n"); + unwind_stack(NULL, esp, print_trace); +} + +void show_trace_task(struct task_struct *tsk) +{ + /* + * TODO: SysRq-T trace dump... + */ +} + +void show_registers(struct pt_regs *regs) +{ + int i; + int in_kernel = 1; + unsigned long esp; + + esp = (unsigned long)(regs->sp); + if (user_mode(regs)) + in_kernel = 0; + + printk("CPU #: %d\n" + " PC: %08lx SR: %08lx SP: %08lx\n", + smp_processor_id(), regs->pc, regs->sr, regs->sp); + printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", + 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); + printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", + regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); + printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", + regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); + printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", + regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); + printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", + regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); + printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", + regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); + printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", + regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); + printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", + regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); + printk(" RES: %08lx oGPR11: %08lx\n", + regs->gpr[11], regs->orig_gpr11); + + printk("Process %s (pid: %d, stackpage=%08lx)\n", + current->comm, current->pid, (unsigned long)current); + /* + * When in-kernel, we also print out the stack and code at the + * time of the fault.. + */ + if (in_kernel) { + + printk("\nStack: "); + show_stack(NULL, (unsigned long *)esp); + + printk("\nCode: "); + if (regs->pc < PAGE_OFFSET) + goto bad; + + for (i = -24; i < 24; i++) { + unsigned char c; + if (__get_user(c, &((unsigned char *)regs->pc)[i])) { +bad: + printk(" Bad PC value."); + break; + } + + if (i == 0) + printk("(%02x) ", c); + else + printk("%02x ", c); + } + } + printk("\n"); +} + +void nommu_dump_state(struct pt_regs *regs, + unsigned long ea, unsigned long vector) +{ + int i; + unsigned long addr, stack = regs->sp; + + printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector); + + printk("CPU #: %d\n" + " PC: %08lx SR: %08lx SP: %08lx\n", + 0, regs->pc, regs->sr, regs->sp); + printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", + 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); + printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", + regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); + printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", + regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); + printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", + regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); + printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", + regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); + printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", + regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); + printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", + regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); + printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", + regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); + printk(" RES: %08lx oGPR11: %08lx\n", + regs->gpr[11], regs->orig_gpr11); + + printk("Process %s (pid: %d, stackpage=%08lx)\n", + ((struct task_struct *)(__pa(current)))->comm, + ((struct task_struct *)(__pa(current)))->pid, + (unsigned long)current); + + printk("\nStack: "); + printk("Stack dump [0x%08lx]:\n", (unsigned long)stack); + for (i = 0; i < kstack_depth_to_print; i++) { + if (((long)stack & (THREAD_SIZE - 1)) == 0) + break; + stack++; + + printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4, + *((unsigned long *)(__pa(stack)))); + } + printk("\n"); + + printk("Call Trace: "); + i = 1; + while (((long)stack & (THREAD_SIZE - 1)) != 0) { + addr = *((unsigned long *)__pa(stack)); + stack++; + + if (kernel_text_address(addr)) { + if (i && ((i % 6) == 0)) + printk("\n "); + printk(" [<%08lx>]", addr); + i++; + } + } + printk("\n"); + + printk("\nCode: "); + + for (i = -24; i < 24; i++) { + unsigned char c; + c = ((unsigned char *)(__pa(regs->pc)))[i]; + + if (i == 0) + printk("(%02x) ", c); + else + printk("%02x ", c); + } + printk("\n"); +} + +/* This is normally the 'Oops' routine */ +void die(const char *str, struct pt_regs *regs, long err) +{ + + console_verbose(); + printk("\n%s#: %04lx\n", str, err & 0xffff); + show_registers(regs); +#ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION + printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n"); + + /* shut down interrupts */ + local_irq_disable(); + + __asm__ __volatile__("l.nop 1"); + do {} while (1); +#endif + do_exit(SIGSEGV); +} + +/* This is normally the 'Oops' routine */ +void die_if_kernel(const char *str, struct pt_regs *regs, long err) +{ + if (user_mode(regs)) + return; + + die(str, regs, err); +} + +void unhandled_exception(struct pt_regs *regs, int ea, int vector) +{ + printk("Unable to handle exception at EA =0x%x, vector 0x%x", + ea, vector); + die("Oops", regs, 9); +} + +void __init trap_init(void) +{ + /* Nothing needs to be done */ +} + +asmlinkage void do_trap(struct pt_regs *regs, unsigned long address) +{ + force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)address, current); + + regs->pc += 4; +} + +asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) +{ + if (user_mode(regs)) { + /* Send a SIGBUS */ + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)address, current); + } else { + printk("KERNEL: Unaligned Access 0x%.8lx\n", address); + show_registers(regs); + die("Die:", regs, address); + } + +} + +asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address) +{ + if (user_mode(regs)) { + /* Send a SIGBUS */ + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); + } else { /* Kernel mode */ + printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address); + show_registers(regs); + die("Die:", regs, address); + } +} + +static inline int in_delay_slot(struct pt_regs *regs) +{ +#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX + /* No delay slot flag, do the old way */ + unsigned int op, insn; + + insn = *((unsigned int *)regs->pc); + op = insn >> 26; + switch (op) { + case 0x00: /* l.j */ + case 0x01: /* l.jal */ + case 0x03: /* l.bnf */ + case 0x04: /* l.bf */ + case 0x11: /* l.jr */ + case 0x12: /* l.jalr */ + return 1; + default: + return 0; + } +#else + return mfspr(SPR_SR) & SPR_SR_DSX; +#endif +} + +static inline void adjust_pc(struct pt_regs *regs, unsigned long address) +{ + int displacement; + unsigned int rb, op, jmp; + + if (unlikely(in_delay_slot(regs))) { + /* In delay slot, instruction at pc is a branch, simulate it */ + jmp = *((unsigned int *)regs->pc); + + displacement = sign_extend32(((jmp) & 0x3ffffff) << 2, 27); + rb = (jmp & 0x0000ffff) >> 11; + op = jmp >> 26; + + switch (op) { + case 0x00: /* l.j */ + regs->pc += displacement; + return; + case 0x01: /* l.jal */ + regs->pc += displacement; + regs->gpr[9] = regs->pc + 8; + return; + case 0x03: /* l.bnf */ + if (regs->sr & SPR_SR_F) + regs->pc += 8; + else + regs->pc += displacement; + return; + case 0x04: /* l.bf */ + if (regs->sr & SPR_SR_F) + regs->pc += displacement; + else + regs->pc += 8; + return; + case 0x11: /* l.jr */ + regs->pc = regs->gpr[rb]; + return; + case 0x12: /* l.jalr */ + regs->pc = regs->gpr[rb]; + regs->gpr[9] = regs->pc + 8; + return; + default: + break; + } + } else { + regs->pc += 4; + } +} + +static inline void simulate_lwa(struct pt_regs *regs, unsigned long address, + unsigned int insn) +{ + unsigned int ra, rd; + unsigned long value; + unsigned long orig_pc; + long imm; + + const struct exception_table_entry *entry; + + orig_pc = regs->pc; + adjust_pc(regs, address); + + ra = (insn >> 16) & 0x1f; + rd = (insn >> 21) & 0x1f; + imm = (short)insn; + lwa_addr = (unsigned long __user *)(regs->gpr[ra] + imm); + + if ((unsigned long)lwa_addr & 0x3) { + do_unaligned_access(regs, address); + return; + } + + if (get_user(value, lwa_addr)) { + if (user_mode(regs)) { + force_sig(SIGSEGV, current); + return; + } + + if ((entry = search_exception_tables(orig_pc))) { + regs->pc = entry->fixup; + return; + } + + /* kernel access in kernel space, load it directly */ + value = *((unsigned long *)lwa_addr); + } + + lwa_flag = 1; + regs->gpr[rd] = value; +} + +static inline void simulate_swa(struct pt_regs *regs, unsigned long address, + unsigned int insn) +{ + unsigned long __user *vaddr; + unsigned long orig_pc; + unsigned int ra, rb; + long imm; + + const struct exception_table_entry *entry; + + orig_pc = regs->pc; + adjust_pc(regs, address); + + ra = (insn >> 16) & 0x1f; + rb = (insn >> 11) & 0x1f; + imm = (short)(((insn & 0x2200000) >> 10) | (insn & 0x7ff)); + vaddr = (unsigned long __user *)(regs->gpr[ra] + imm); + + if (!lwa_flag || vaddr != lwa_addr) { + regs->sr &= ~SPR_SR_F; + return; + } + + if ((unsigned long)vaddr & 0x3) { + do_unaligned_access(regs, address); + return; + } + + if (put_user(regs->gpr[rb], vaddr)) { + if (user_mode(regs)) { + force_sig(SIGSEGV, current); + return; + } + + if ((entry = search_exception_tables(orig_pc))) { + regs->pc = entry->fixup; + return; + } + + /* kernel access in kernel space, store it directly */ + *((unsigned long *)vaddr) = regs->gpr[rb]; + } + + lwa_flag = 0; + regs->sr |= SPR_SR_F; +} + +#define INSN_LWA 0x1b +#define INSN_SWA 0x33 + +asmlinkage void do_illegal_instruction(struct pt_regs *regs, + unsigned long address) +{ + unsigned int op; + unsigned int insn = *((unsigned int *)address); + + op = insn >> 26; + + switch (op) { + case INSN_LWA: + simulate_lwa(regs, address, insn); + return; + + case INSN_SWA: + simulate_swa(regs, address, insn); + return; + + default: + break; + } + + if (user_mode(regs)) { + /* Send a SIGILL */ + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)address, current); + } else { /* Kernel mode */ + printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n", + address); + show_registers(regs); + die("Die:", regs, address); + } +} diff --git a/arch/openrisc/kernel/unwinder.c b/arch/openrisc/kernel/unwinder.c new file mode 100644 index 000000000..8ae15c2c1 --- /dev/null +++ b/arch/openrisc/kernel/unwinder.c @@ -0,0 +1,105 @@ +/* + * OpenRISC unwinder.c + * + * Reusable arch specific api for unwinding stacks. + * + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/sched/task_stack.h> +#include <linux/kernel.h> + +#include <asm/unwinder.h> + +#ifdef CONFIG_FRAME_POINTER +struct or1k_frameinfo { + unsigned long *fp; + unsigned long ra; + unsigned long top; +}; + +/* + * Verify a frameinfo structure. The return address should be a valid text + * address. The frame pointer may be null if its the last frame, otherwise + * the frame pointer should point to a location in the stack after the the + * top of the next frame up. + */ +static inline int or1k_frameinfo_valid(struct or1k_frameinfo *frameinfo) +{ + return (frameinfo->fp == NULL || + (!kstack_end(frameinfo->fp) && + frameinfo->fp > &frameinfo->top)) && + __kernel_text_address(frameinfo->ra); +} + +/* + * Create a stack trace doing scanning which is frame pointer aware. We can + * get reliable stack traces by matching the previously found frame + * pointer with the top of the stack address every time we find a valid + * or1k_frameinfo. + * + * Ideally the stack parameter will be passed as FP, but it can not be + * guaranteed. Therefore we scan each address looking for the first sign + * of a return address. + * + * The OpenRISC stack frame looks something like the following. The + * location SP is held in r1 and location FP is held in r2 when frame pointers + * enabled. + * + * SP -> (top of stack) + * - (callee saved registers) + * - (local variables) + * FP-8 -> previous FP \ + * FP-4 -> return address |- or1k_frameinfo + * FP -> (previous top of stack) / + */ +void unwind_stack(void *data, unsigned long *stack, + void (*trace)(void *data, unsigned long addr, int reliable)) +{ + unsigned long *next_fp = NULL; + struct or1k_frameinfo *frameinfo = NULL; + int reliable = 0; + + while (!kstack_end(stack)) { + frameinfo = container_of(stack, + struct or1k_frameinfo, + top); + + if (__kernel_text_address(frameinfo->ra)) { + if (or1k_frameinfo_valid(frameinfo) && + (next_fp == NULL || + next_fp == &frameinfo->top)) { + reliable = 1; + next_fp = frameinfo->fp; + } else + reliable = 0; + + trace(data, frameinfo->ra, reliable); + } + stack++; + } +} + +#else /* CONFIG_FRAME_POINTER */ + +/* + * Create a stack trace by doing a simple scan treating all text addresses + * as return addresses. + */ +void unwind_stack(void *data, unsigned long *stack, + void (*trace)(void *data, unsigned long addr, int reliable)) +{ + unsigned long addr; + + while (!kstack_end(stack)) { + addr = *stack++; + if (__kernel_text_address(addr)) + trace(data, addr, 0); + } +} +#endif /* CONFIG_FRAME_POINTER */ + diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h new file mode 100644 index 000000000..bdea46c61 --- /dev/null +++ b/arch/openrisc/kernel/vmlinux.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __OPENRISC_VMLINUX_H_ +#define __OPENRISC_VMLINUX_H_ + +#ifdef CONFIG_BLK_DEV_INITRD +extern char __initrd_start, __initrd_end; +#endif + +#endif diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S new file mode 100644 index 000000000..953bdcd54 --- /dev/null +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -0,0 +1,126 @@ +/* + * OpenRISC vmlinux.lds.S + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * ld script for OpenRISC architecture + */ + +/* TODO + * - clean up __offset & stuff + * - change all 8192 alignment to PAGE !!! + * - recheck if all alignments are really needed + */ + +# define LOAD_OFFSET PAGE_OFFSET +# define LOAD_BASE PAGE_OFFSET + +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/thread_info.h> +#include <asm-generic/vmlinux.lds.h> + +#ifdef __OR1K__ +#define __OUTPUT_FORMAT "elf32-or1k" +#else +#define __OUTPUT_FORMAT "elf32-or32" +#endif + +OUTPUT_FORMAT(__OUTPUT_FORMAT, __OUTPUT_FORMAT, __OUTPUT_FORMAT) +jiffies = jiffies_64 + 4; + +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = LOAD_BASE ; + + _text = .; + + /* _s_kernel_ro must be page aligned */ + . = ALIGN(PAGE_SIZE); + _s_kernel_ro = .; + + .text : AT(ADDR(.text) - LOAD_OFFSET) + { + _stext = .; + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + *(.fixup) + *(.text.__*) + _etext = .; + } + /* TODO: Check if fixup and text.__* are really necessary + * fixup is definitely necessary + */ + + _sdata = .; + + /* Page alignment required for RO_DATA_SECTION */ + RO_DATA_SECTION(PAGE_SIZE) + _e_kernel_ro = .; + + /* Whatever comes after _e_kernel_ro had better be page-aligend, too */ + + /* 32 here is cacheline size... recheck this */ + RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE) + + _edata = .; + + EXCEPTION_TABLE(4) + NOTES + + /* Init code and data */ + . = ALIGN(PAGE_SIZE); + __init_begin = .; + + HEAD_TEXT_SECTION + + /* Page aligned */ + INIT_TEXT_SECTION(PAGE_SIZE) + + /* Align __setup_start on 16 byte boundary */ + INIT_DATA_SECTION(16) + + PERCPU_SECTION(L1_CACHE_BYTES) + + __init_end = .; + + . = ALIGN(PAGE_SIZE); + .initrd : AT(ADDR(.initrd) - LOAD_OFFSET) + { + __initrd_start = .; + *(.initrd) + __initrd_end = .; + FILL (0); + . = ALIGN (PAGE_SIZE); + } + + __vmlinux_end = .; /* last address of the physical file */ + + BSS_SECTION(0, 0, 0x20) + + _end = .; + + /* Throw in the debugging sections */ + STABS_DEBUG + DWARF_DEBUG + + /* Sections to be discarded -- must be last */ + DISCARDS +} diff --git a/arch/openrisc/lib/Makefile b/arch/openrisc/lib/Makefile new file mode 100644 index 000000000..17d9d37f3 --- /dev/null +++ b/arch/openrisc/lib/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for or32 specific library files.. +# + +obj-y := delay.o string.o memset.o memcpy.o diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c new file mode 100644 index 000000000..a92bd621a --- /dev/null +++ b/arch/openrisc/lib/delay.c @@ -0,0 +1,61 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation + * + * Precise Delay Loops + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/init.h> +#include <asm/param.h> +#include <asm/delay.h> +#include <asm/timex.h> +#include <asm/processor.h> + +int read_current_timer(unsigned long *timer_value) +{ + *timer_value = get_cycles(); + return 0; +} + +void __delay(unsigned long cycles) +{ + cycles_t start = get_cycles(); + + while ((get_cycles() - start) < cycles) + cpu_relax(); +} +EXPORT_SYMBOL(__delay); + +inline void __const_udelay(unsigned long xloops) +{ + unsigned long long loops; + + loops = (unsigned long long)xloops * loops_per_jiffy * HZ; + + __delay(loops >> 32); +} +EXPORT_SYMBOL(__const_udelay); + +void __udelay(unsigned long usecs) +{ + __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ +} +EXPORT_SYMBOL(__udelay); + +void __ndelay(unsigned long nsecs) +{ + __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/openrisc/lib/memcpy.c b/arch/openrisc/lib/memcpy.c new file mode 100644 index 000000000..fe2177628 --- /dev/null +++ b/arch/openrisc/lib/memcpy.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/openrisc/lib/memcpy.c + * + * Optimized memory copy routines for openrisc. These are mostly copied + * from ohter sources but slightly entended based on ideas discuassed in + * #openrisc. + * + * The word unroll implementation is an extension to the arm byte + * unrolled implementation, but using word copies (if things are + * properly aligned) + * + * The great arm loop unroll algorithm can be found at: + * arch/arm/boot/compressed/string.c + */ + +#include <linux/export.h> + +#include <linux/string.h> + +#ifdef CONFIG_OR1K_1200 +/* + * Do memcpy with word copies and loop unrolling. This gives the + * best performance on the OR1200 and MOR1KX archirectures + */ +void *memcpy(void *dest, __const void *src, __kernel_size_t n) +{ + int i = 0; + unsigned char *d, *s; + uint32_t *dest_w = (uint32_t *)dest, *src_w = (uint32_t *)src; + + /* If both source and dest are word aligned copy words */ + if (!((unsigned int)dest_w & 3) && !((unsigned int)src_w & 3)) { + /* Copy 32 bytes per loop */ + for (i = n >> 5; i > 0; i--) { + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + } + + if (n & 1 << 4) { + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + } + + if (n & 1 << 3) { + *dest_w++ = *src_w++; + *dest_w++ = *src_w++; + } + + if (n & 1 << 2) + *dest_w++ = *src_w++; + + d = (unsigned char *)dest_w; + s = (unsigned char *)src_w; + + } else { + d = (unsigned char *)dest_w; + s = (unsigned char *)src_w; + + for (i = n >> 3; i > 0; i--) { + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + } + + if (n & 1 << 2) { + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + } + } + + if (n & 1 << 1) { + *d++ = *s++; + *d++ = *s++; + } + + if (n & 1) + *d++ = *s++; + + return dest; +} +#else +/* + * Use word copies but no loop unrolling as we cannot assume there + * will be benefits on the archirecture + */ +void *memcpy(void *dest, __const void *src, __kernel_size_t n) +{ + unsigned char *d = (unsigned char *)dest, *s = (unsigned char *)src; + uint32_t *dest_w = (uint32_t *)dest, *src_w = (uint32_t *)src; + + /* If both source and dest are word aligned copy words */ + if (!((unsigned int)dest_w & 3) && !((unsigned int)src_w & 3)) { + for (; n >= 4; n -= 4) + *dest_w++ = *src_w++; + } + + d = (unsigned char *)dest_w; + s = (unsigned char *)src_w; + + /* For remaining or if not aligned, copy bytes */ + for (; n >= 1; n -= 1) + *d++ = *s++; + + return dest; + +} +#endif + +EXPORT_SYMBOL(memcpy); diff --git a/arch/openrisc/lib/memset.S b/arch/openrisc/lib/memset.S new file mode 100644 index 000000000..92cc2eac2 --- /dev/null +++ b/arch/openrisc/lib/memset.S @@ -0,0 +1,98 @@ +/* + * OpenRISC memset.S + * + * Hand-optimized assembler version of memset for OpenRISC. + * Algorithm inspired by several other arch-specific memset routines + * in the kernel tree + * + * Copyright (C) 2015 Olof Kindgren <olof.kindgren@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + + .global memset + .type memset, @function +memset: + /* arguments: + * r3 = *s + * r4 = c + * r5 = n + * r13, r15, r17, r19 used as temp regs + */ + + /* Exit if n == 0 */ + l.sfeqi r5, 0 + l.bf 4f + + /* Truncate c to char */ + l.andi r13, r4, 0xff + + /* Skip word extension if c is 0 */ + l.sfeqi r13, 0 + l.bf 1f + /* Check for at least two whole words (8 bytes) */ + l.sfleui r5, 7 + + /* Extend char c to 32-bit word cccc in r13 */ + l.slli r15, r13, 16 // r13 = 000c, r15 = 0c00 + l.or r13, r13, r15 // r13 = 0c0c, r15 = 0c00 + l.slli r15, r13, 8 // r13 = 0c0c, r15 = c0c0 + l.or r13, r13, r15 // r13 = cccc, r15 = c0c0 + +1: l.addi r19, r3, 0 // Set r19 = src + /* Jump to byte copy loop if less than two words */ + l.bf 3f + l.or r17, r5, r0 // Set r17 = n + + /* Mask out two LSBs to check alignment */ + l.andi r15, r3, 0x3 + + /* lsb == 00, jump to word copy loop */ + l.sfeqi r15, 0 + l.bf 2f + l.addi r19, r3, 0 // Set r19 = src + + /* lsb == 01,10 or 11 */ + l.sb 0(r3), r13 // *src = c + l.addi r17, r17, -1 // Decrease n + + l.sfeqi r15, 3 + l.bf 2f + l.addi r19, r3, 1 // src += 1 + + /* lsb == 01 or 10 */ + l.sb 1(r3), r13 // *(src+1) = c + l.addi r17, r17, -1 // Decrease n + + l.sfeqi r15, 2 + l.bf 2f + l.addi r19, r3, 2 // src += 2 + + /* lsb == 01 */ + l.sb 2(r3), r13 // *(src+2) = c + l.addi r17, r17, -1 // Decrease n + l.addi r19, r3, 3 // src += 3 + + /* Word copy loop */ +2: l.sw 0(r19), r13 // *src = cccc + l.addi r17, r17, -4 // Decrease n + l.sfgeui r17, 4 + l.bf 2b + l.addi r19, r19, 4 // Increase src + + /* When n > 0, copy the remaining bytes, otherwise jump to exit */ + l.sfeqi r17, 0 + l.bf 4f + + /* Byte copy loop */ +3: l.addi r17, r17, -1 // Decrease n + l.sb 0(r19), r13 // *src = cccc + l.sfnei r17, 0 + l.bf 3b + l.addi r19, r19, 1 // Increase src + +4: l.jr r9 + l.ori r11, r3, 0 diff --git a/arch/openrisc/lib/string.S b/arch/openrisc/lib/string.S new file mode 100644 index 000000000..c09fee7de --- /dev/null +++ b/arch/openrisc/lib/string.S @@ -0,0 +1,105 @@ +/* + * OpenRISC string.S + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/linkage.h> +#include <asm/errno.h> + + /* + * this can be optimized by doing gcc inline assemlby with + * proper constraints (no need to save args registers...) + * + */ + + +/* + * + * int __copy_tofrom_user(void *to, const void *from, unsigned long size); + * + * NOTE: it returns number of bytes NOT copied !!! + * + */ + .global __copy_tofrom_user +__copy_tofrom_user: + l.addi r1,r1,-12 + l.sw 0(r1),r6 + l.sw 4(r1),r4 + l.sw 8(r1),r3 + + l.addi r11,r5,0 +2: l.sfeq r11,r0 + l.bf 1f + l.addi r11,r11,-1 +8: l.lbz r6,0(r4) +9: l.sb 0(r3),r6 + l.addi r3,r3,1 + l.j 2b + l.addi r4,r4,1 +1: + l.addi r11,r11,1 // r11 holds the return value + + l.lwz r6,0(r1) + l.lwz r4,4(r1) + l.lwz r3,8(r1) + l.jr r9 + l.addi r1,r1,12 + + .section .fixup, "ax" +99: + l.j 1b + l.nop + .previous + + .section __ex_table, "a" + .long 8b, 99b // read fault + .long 9b, 99b // write fault + .previous + +/* + * unsigned long clear_user(void *addr, unsigned long size) ; + * + * NOTE: it returns number of bytes NOT cleared !!! + */ + .global __clear_user +__clear_user: + l.addi r1,r1,-8 + l.sw 0(r1),r4 + l.sw 4(r1),r3 + +2: l.sfeq r4,r0 + l.bf 1f + l.addi r4,r4,-1 +9: l.sb 0(r3),r0 + l.j 2b + l.addi r3,r3,1 + +1: + l.addi r11,r4,1 + + l.lwz r4,0(r1) + l.lwz r3,4(r1) + l.jr r9 + l.addi r1,r1,8 + + .section .fixup, "ax" +99: + l.j 1b + l.nop + .previous + + .section __ex_table, "a" + .long 9b, 99b // write fault + .previous diff --git a/arch/openrisc/mm/Makefile b/arch/openrisc/mm/Makefile new file mode 100644 index 000000000..a31b2a42e --- /dev/null +++ b/arch/openrisc/mm/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the linux openrisc-specific parts of the memory manager. +# + +obj-y := fault.o cache.o tlb.o init.o ioremap.o diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c new file mode 100644 index 000000000..4272d9123 --- /dev/null +++ b/arch/openrisc/mm/cache.c @@ -0,0 +1,61 @@ +/* + * OpenRISC cache.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2015 Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <asm/spr.h> +#include <asm/spr_defs.h> +#include <asm/cache.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +static __always_inline void cache_loop(struct page *page, const unsigned int reg) +{ + unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; + unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); + + while (line < paddr + PAGE_SIZE) { + mtspr(reg, line); + line += L1_CACHE_BYTES; + } +} + +void local_dcache_page_flush(struct page *page) +{ + cache_loop(page, SPR_DCBFR); +} +EXPORT_SYMBOL(local_dcache_page_flush); + +void local_icache_page_inv(struct page *page) +{ + cache_loop(page, SPR_ICBIR); +} +EXPORT_SYMBOL(local_icache_page_inv); + +void update_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte) +{ + unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT; + struct page *page = pfn_to_page(pfn); + int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); + + /* + * Since icaches do not snoop for updated data on OpenRISC, we + * must write back and invalidate any dirty pages manually. We + * can skip data pages, since they will not end up in icaches. + */ + if ((vma->vm_flags & VM_EXEC) && dirty) + sync_icache_dcache(page); +} + diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c new file mode 100644 index 000000000..dc4dbafc1 --- /dev/null +++ b/arch/openrisc/mm/fault.c @@ -0,0 +1,354 @@ +/* + * OpenRISC fault.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/extable.h> +#include <linux/sched/signal.h> + +#include <linux/uaccess.h> +#include <asm/siginfo.h> +#include <asm/signal.h> + +#define NUM_TLB_ENTRIES 64 +#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1)) + +unsigned long pte_misses; /* updated by do_page_fault() */ +unsigned long pte_errors; /* updated by do_page_fault() */ + +/* __PHX__ :: - check the vmalloc_fault in do_page_fault() + * - also look into include/asm-or32/mmu_context.h + */ +volatile pgd_t *current_pgd[NR_CPUS]; + +extern void die(char *, struct pt_regs *, long); + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + * + * If this routine detects a bad access, it returns 1, otherwise it + * returns 0. + */ + +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long vector, int write_acc) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct *vma; + int si_code; + vm_fault_t fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + + tsk = current; + + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + * + * NOTE2: This is done so that, when updating the vmalloc + * mappings we don't have to walk all processes pgdirs and + * add the high mappings all at once. Instead we do it as they + * are used. However vmalloc'ed page entries have the PAGE_GLOBAL + * bit set so sometimes the TLB can use a lingering entry. + * + * This verifies that the fault happens in kernel space + * and that the fault was not a protection error. + */ + + if (address >= VMALLOC_START && + (vector != 0x300 && vector != 0x400) && + !user_mode(regs)) + goto vmalloc_fault; + + /* If exceptions were enabled, we can reenable them here */ + if (user_mode(regs)) { + /* Exception was in userspace: reenable interrupts */ + local_irq_enable(); + flags |= FAULT_FLAG_USER; + } else { + /* If exception was in a syscall, then IRQ's may have + * been enabled or disabled. If they were enabled, + * reenable them. + */ + if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE)) + local_irq_enable(); + } + + mm = tsk->mm; + si_code = SEGV_MAPERR; + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + + if (in_interrupt() || !mm) + goto no_context; + +retry: + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + + if (!vma) + goto bad_area; + + if (vma->vm_start <= address) + goto good_area; + + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + + if (user_mode(regs)) { + /* + * accessing the stack below usp is always a bug. + * we get page-aligned addresses so we can only check + * if we're within a page from usp, but that might be + * enough to catch brutal errors at least. + */ + if (address + PAGE_SIZE < regs->sp) + goto bad_area; + } + if (expand_stack(vma, address)) + goto bad_area; + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ + +good_area: + si_code = SEGV_ACCERR; + + /* first do some preliminary protection checks */ + + if (write_acc) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + flags |= FAULT_FLAG_WRITE; + } else { + /* not present */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; + } + + /* are we trying to execute nonexecutable area */ + if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC)) + goto bad_area; + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + + fault = handle_mm_fault(vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + + if (flags & FAULT_FLAG_ALLOW_RETRY) { + /*RGD modeled on Cris */ + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } + + up_read(&mm->mmap_sem); + return; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ + +bad_area: + up_read(&mm->mmap_sem); + +bad_area_nosemaphore: + + /* User mode accesses just cause a SIGSEGV */ + + if (user_mode(regs)) { + force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); + return; + } + +no_context: + + /* Are we prepared to handle this kernel fault? + * + * (The kernel has valid exception-points in the source + * when it acesses user-memory. When it fails in one + * of those points, we find it in a table and do a jump + * to some fixup code that loads an appropriate error + * code) + */ + + { + const struct exception_table_entry *entry; + + __asm__ __volatile__("l.nop 42"); + + if ((entry = search_exception_tables(regs->pc)) != NULL) { + /* Adjust the instruction pointer in the stackframe */ + regs->pc = entry->fixup; + return; + } + } + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + + if ((unsigned long)(address) < PAGE_SIZE) + printk(KERN_ALERT + "Unable to handle kernel NULL pointer dereference"); + else + printk(KERN_ALERT "Unable to handle kernel access"); + printk(" at virtual address 0x%08lx\n", address); + + die("Oops", regs, write_acc); + + do_exit(SIGKILL); + + /* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ + +out_of_memory: + __asm__ __volatile__("l.nop 42"); + __asm__ __volatile__("l.nop 1"); + + up_read(&mm->mmap_sem); + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; + +do_sigbus: + up_read(&mm->mmap_sem); + + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; + return; + +vmalloc_fault: + { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Use current_pgd instead of tsk->active_mm->pgd + * since the latter might be unavailable if this + * code is executed in a misfortunately run irq + * (like inside schedule() between switch_mm and + * switch_to...). + */ + + int offset = pgd_index(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + +/* + phx_warn("do_page_fault(): vmalloc_fault will not work, " + "since current_pgd assign a proper value somewhere\n" + "anyhow we don't need this at the moment\n"); + + phx_mmu("vmalloc_fault"); +*/ + pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset; + pgd_k = init_mm.pgd + offset; + + /* Since we're two-level, we don't need to do both + * set_pgd and set_pmd (they do the same thing). If + * we go three-level at some point, do the right thing + * with pgd_present and set_pgd here. + * + * Also, since the vmalloc area is global, we don't + * need to copy individual PTE's, it is enough to + * copy the pgd pointer into the pte page of the + * root task. If that is there, we'll find our pte if + * it exists. + */ + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + goto no_context; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + + if (!pmd_present(*pmd_k)) + goto bad_area_nosemaphore; + + set_pmd(pmd, *pmd_k); + + /* Make sure the actual PTE exists as well to + * catch kernel vmalloc-area accesses to non-mapped + * addresses. If we don't do this, this will just + * silently loop forever. + */ + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + goto no_context; + + return; + } +} diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c new file mode 100644 index 000000000..6972d5d6f --- /dev/null +++ b/arch/openrisc/mm/init.c @@ -0,0 +1,235 @@ +/* + * OpenRISC idle.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/smp.h> +#include <linux/bootmem.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/blkdev.h> /* for initrd_* */ +#include <linux/pagemap.h> +#include <linux/memblock.h> + +#include <asm/segment.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/dma.h> +#include <asm/io.h> +#include <asm/tlb.h> +#include <asm/mmu_context.h> +#include <asm/kmap_types.h> +#include <asm/fixmap.h> +#include <asm/tlbflush.h> +#include <asm/sections.h> + +int mem_init_done; + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +static void __init zone_sizes_init(void) +{ + unsigned long zones_size[MAX_NR_ZONES]; + + /* Clear the zone sizes */ + memset(zones_size, 0, sizeof(zones_size)); + + /* + * We use only ZONE_NORMAL + */ + zones_size[ZONE_NORMAL] = max_low_pfn; + + free_area_init(zones_size); +} + +extern const char _s_kernel_ro[], _e_kernel_ro[]; + +/* + * Map all physical memory into kernel's address space. + * + * This is explicitly coded for two-level page tables, so if you need + * something else then this needs to change. + */ +static void __init map_ram(void) +{ + unsigned long v, p, e; + pgprot_t prot; + pgd_t *pge; + pud_t *pue; + pmd_t *pme; + pte_t *pte; + /* These mark extents of read-only kernel pages... + * ...from vmlinux.lds.S + */ + struct memblock_region *region; + + v = PAGE_OFFSET; + + for_each_memblock(memory, region) { + p = (u32) region->base & PAGE_MASK; + e = p + (u32) region->size; + + v = (u32) __va(p); + pge = pgd_offset_k(v); + + while (p < e) { + int j; + pue = pud_offset(pge, v); + pme = pmd_offset(pue, v); + + if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { + panic("%s: OR1K kernel hardcoded for " + "two-level page tables", + __func__); + } + + /* Alloc one page for holding PTE's... */ + pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); + set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte))); + + /* Fill the newly allocated page with PTE'S */ + for (j = 0; p < e && j < PTRS_PER_PTE; + v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) { + if (v >= (u32) _e_kernel_ro || + v < (u32) _s_kernel_ro) + prot = PAGE_KERNEL; + else + prot = PAGE_KERNEL_RO; + + set_pte(pte, mk_pte_phys(p, prot)); + } + + pge++; + } + + printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, + region->base, region->base + region->size); + } +} + +void __init paging_init(void) +{ + extern void tlb_init(void); + + unsigned long end; + int i; + + printk(KERN_INFO "Setting up paging and PTEs.\n"); + + /* clear out the init_mm.pgd that will contain the kernel's mappings */ + + for (i = 0; i < PTRS_PER_PGD; i++) + swapper_pg_dir[i] = __pgd(0); + + /* make sure the current pgd table points to something sane + * (even if it is most probably not used until the next + * switch_mm) + */ + current_pgd[smp_processor_id()] = init_mm.pgd; + + end = (unsigned long)__va(max_low_pfn * PAGE_SIZE); + + map_ram(); + + zone_sizes_init(); + + /* self modifying code ;) */ + /* Since the old TLB miss handler has been running up until now, + * the kernel pages are still all RW, so we can still modify the + * text directly... after this change and a TLB flush, the kernel + * pages will become RO. + */ + { + extern unsigned long dtlb_miss_handler; + extern unsigned long itlb_miss_handler; + + unsigned long *dtlb_vector = __va(0x900); + unsigned long *itlb_vector = __va(0xa00); + + printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler); + *itlb_vector = ((unsigned long)&itlb_miss_handler - + (unsigned long)itlb_vector) >> 2; + + /* Soft ordering constraint to ensure that dtlb_vector is + * the last thing updated + */ + barrier(); + + printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler); + *dtlb_vector = ((unsigned long)&dtlb_miss_handler - + (unsigned long)dtlb_vector) >> 2; + + } + + /* Soft ordering constraint to ensure that cache invalidation and + * TLB flush really happen _after_ code has been modified. + */ + barrier(); + + /* Invalidate instruction caches after code modification */ + mtspr(SPR_ICBIR, 0x900); + mtspr(SPR_ICBIR, 0xa00); + + /* New TLB miss handlers and kernel page tables are in now place. + * Make sure that page flags get updated for all pages in TLB by + * flushing the TLB and forcing all TLB entries to be recreated + * from their page table flags. + */ + flush_tlb_all(); +} + +/* References to section boundaries */ + +void __init mem_init(void) +{ + BUG_ON(!mem_map); + + max_mapnr = max_low_pfn; + high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); + + /* clear the zero-page */ + memset((void *)empty_zero_page, 0, PAGE_SIZE); + + /* this will put all low memory onto the freelists */ + free_all_bootmem(); + + mem_init_print_info(NULL); + + printk("mem_init_done ...........................................\n"); + mem_init_done = 1; + return; +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + free_reserved_area((void *)start, (void *)end, -1, "initrd"); +} +#endif + +void free_initmem(void) +{ + free_initmem_default(-1); +} diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c new file mode 100644 index 000000000..2175e4bfd --- /dev/null +++ b/arch/openrisc/mm/ioremap.c @@ -0,0 +1,135 @@ +/* + * OpenRISC ioremap.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/vmalloc.h> +#include <linux/io.h> +#include <asm/pgalloc.h> +#include <asm/kmap_types.h> +#include <asm/fixmap.h> +#include <asm/bug.h> +#include <asm/pgtable.h> +#include <linux/sched.h> +#include <asm/tlbflush.h> + +extern int mem_init_done; + +static unsigned int fixmaps_used __initdata; + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +void __iomem *__ref +__ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot) +{ + phys_addr_t p; + unsigned long v; + unsigned long offset, last_addr; + struct vm_struct *area = NULL; + + /* Don't allow wraparound or zero size */ + last_addr = addr + size - 1; + if (!size || last_addr < addr) + return NULL; + + /* + * Mappings have to be page-aligned + */ + offset = addr & ~PAGE_MASK; + p = addr & PAGE_MASK; + size = PAGE_ALIGN(last_addr + 1) - p; + + if (likely(mem_init_done)) { + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + v = (unsigned long)area->addr; + } else { + if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS) + return NULL; + v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used); + fixmaps_used += (size >> PAGE_SHIFT); + } + + if (ioremap_page_range(v, v + size, p, prot)) { + if (likely(mem_init_done)) + vfree(area->addr); + else + fixmaps_used -= (size >> PAGE_SHIFT); + return NULL; + } + + return (void __iomem *)(offset + (char *)v); +} +EXPORT_SYMBOL(__ioremap); + +void iounmap(void *addr) +{ + /* If the page is from the fixmap pool then we just clear out + * the fixmap mapping. + */ + if (unlikely((unsigned long)addr > FIXADDR_START)) { + /* This is a bit broken... we don't really know + * how big the area is so it's difficult to know + * how many fixed pages to invalidate... + * just flush tlb and hope for the best... + * consider this a FIXME + * + * Really we should be clearing out one or more page + * table entries for these virtual addresses so that + * future references cause a page fault... for now, we + * rely on two things: + * i) this code never gets called on known boards + * ii) invalid accesses to the freed areas aren't made + */ + flush_tlb_all(); + return; + } + + return vfree((void *)(PAGE_MASK & (unsigned long)addr)); +} +EXPORT_SYMBOL(iounmap); + +/** + * OK, this one's a bit tricky... ioremap can get called before memory is + * initialized (early serial console does this) and will want to alloc a page + * for its mapping. No userspace pages will ever get allocated before memory + * is initialized so this applies only to kernel pages. In the event that + * this is called before memory is initialized we allocate the page using + * the memblock infrastructure. + */ + +pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + pte_t *pte; + + if (likely(mem_init_done)) { + pte = (pte_t *) __get_free_page(GFP_KERNEL); + } else { + pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); + } + + if (pte) + clear_page(pte); + return pte; +} diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c new file mode 100644 index 000000000..6c253a2e8 --- /dev/null +++ b/arch/openrisc/mm/tlb.c @@ -0,0 +1,192 @@ +/* + * OpenRISC tlb.c + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/init.h> + +#include <asm/segment.h> +#include <asm/tlbflush.h> +#include <asm/pgtable.h> +#include <asm/mmu_context.h> +#include <asm/spr_defs.h> + +#define NO_CONTEXT -1 + +#define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ + SPR_DMMUCFGR_NTS_OFF)) +#define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ + SPR_IMMUCFGR_NTS_OFF)) +#define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1)) +#define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1)) +/* + * Invalidate all TLB entries. + * + * This comes down to setting the 'valid' bit for all xTLBMR registers to 0. + * Easiest way to accomplish this is to just zero out the xTLBMR register + * completely. + * + */ + +void local_flush_tlb_all(void) +{ + int i; + unsigned long num_tlb_sets; + + /* Determine number of sets for IMMU. */ + /* FIXME: Assumption is I & D nsets equal. */ + num_tlb_sets = NUM_ITLB_SETS; + + for (i = 0; i < num_tlb_sets; i++) { + mtspr_off(SPR_DTLBMR_BASE(0), i, 0); + mtspr_off(SPR_ITLBMR_BASE(0), i, 0); + } +} + +#define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI) +#define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI) + +/* + * Invalidate a single page. This is what the xTLBEIR register is for. + * + * There's no point in checking the vma for PAGE_EXEC to determine whether it's + * the data or instruction TLB that should be flushed... that would take more + * than the few instructions that the following compiles down to! + * + * The case where we don't have the xTLBEIR register really only works for + * MMU's with a single way and is hard-coded that way. + */ + +#define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr) +#define flush_dtlb_page_no_eir(addr) \ + mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0); + +#define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr) +#define flush_itlb_page_no_eir(addr) \ + mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0); + +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + if (have_dtlbeir) + flush_dtlb_page_eir(addr); + else + flush_dtlb_page_no_eir(addr); + + if (have_itlbeir) + flush_itlb_page_eir(addr); + else + flush_itlb_page_no_eir(addr); +} + +void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + int addr; + bool dtlbeir; + bool itlbeir; + + dtlbeir = have_dtlbeir; + itlbeir = have_itlbeir; + + for (addr = start; addr < end; addr += PAGE_SIZE) { + if (dtlbeir) + flush_dtlb_page_eir(addr); + else + flush_dtlb_page_no_eir(addr); + + if (itlbeir) + flush_itlb_page_eir(addr); + else + flush_itlb_page_no_eir(addr); + } +} + +/* + * Invalidate the selected mm context only. + * + * FIXME: Due to some bug here, we're flushing everything for now. + * This should be changed to loop over over mm and call flush_tlb_range. + */ + +void local_flush_tlb_mm(struct mm_struct *mm) +{ + + /* Was seeing bugs with the mm struct passed to us. Scrapped most of + this function. */ + /* Several architctures do this */ + local_flush_tlb_all(); +} + +/* called in schedule() just before actually doing the switch_to */ + +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *next_tsk) +{ + /* remember the pgd for the fault handlers + * this is similar to the pgd register in some other CPU's. + * we need our own copy of it because current and active_mm + * might be invalid at points where we still need to derefer + * the pgd. + */ + current_pgd[smp_processor_id()] = next->pgd; + + /* We don't have context support implemented, so flush all + * entries belonging to previous map + */ + + if (prev != next) + local_flush_tlb_mm(prev); + +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ + +int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context = NO_CONTEXT; + return 0; +} + +/* called by __exit_mm to destroy the used MMU context if any before + * destroying the mm itself. this is only called when the last user of the mm + * drops it. + */ + +void destroy_context(struct mm_struct *mm) +{ + flush_tlb_mm(mm); + +} + +/* called once during VM initialization, from init.c */ + +void __init tlb_init(void) +{ + /* Do nothing... */ + /* invalidate the entire TLB */ + /* flush_tlb_all(); */ +} |