diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/microblaze | |
parent | Initial commit. (diff) | |
download | linux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip |
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/microblaze')
143 files changed, 19162 insertions, 0 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig new file mode 100644 index 000000000..ace5c5bf1 --- /dev/null +++ b/arch/microblaze/Kconfig @@ -0,0 +1,289 @@ +config MICROBLAZE + def_bool y + select ARCH_NO_SWAP + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_NO_COHERENT_DMA_MMAP if !MMU + select ARCH_WANT_IPC_PARSE_VERSION + select BUILDTIME_EXTABLE_SORT + select TIMER_OF + select CLONE_BACKWARDS3 + select COMMON_CLK + select DMA_NONCOHERENT_OPS + select DMA_NONCOHERENT_MMAP + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_CPU_DEVICES + select GENERIC_IDLE_POLL_SETUP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP + select GENERIC_SCHED_CLOCK + select HAVE_ARCH_HASH + select HAVE_ARCH_KGDB + select HAVE_DEBUG_KMEMLEAK + select HAVE_DYNAMIC_FTRACE + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER + select NO_BOOTMEM + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP + select HAVE_OPROFILE + select IRQ_DOMAIN + select XILINX_INTC + select MODULES_USE_ELF_RELA + select OF + select OF_EARLY_FLATTREE + select TRACING_SUPPORT + select VIRT_TO_BUS + select CPU_NO_EFFICIENT_FFS + +# Endianness selection +choice + prompt "Endianness selection" + default CPU_LITTLE_ENDIAN + help + microblaze architectures can be configured for either little or + big endian formats. Be sure to select the appropriate mode. + +config CPU_BIG_ENDIAN + bool "Big endian" + +config CPU_LITTLE_ENDIAN + bool "Little endian" + +endchoice + +config RWSEM_GENERIC_SPINLOCK + def_bool y + +config ZONE_DMA + def_bool y + +config RWSEM_XCHGADD_ALGORITHM + bool + +config ARCH_HAS_ILOG2_U32 + def_bool n + +config ARCH_HAS_ILOG2_U64 + def_bool n + +config GENERIC_HWEIGHT + def_bool y + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config GENERIC_CSUM + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config LOCKDEP_SUPPORT + def_bool y + +source "arch/microblaze/Kconfig.platform" + +menu "Processor type and features" + +source "kernel/Kconfig.hz" + +config MMU + bool "MMU support" + default n + +comment "Boot options" + +config CMDLINE_BOOL + bool "Default bootloader kernel arguments" + +config CMDLINE + string "Default kernel command string" + depends on CMDLINE_BOOL + default "console=ttyUL0,115200" + help + On some architectures there is currently no way for the boot loader + to pass arguments to the kernel. For these architectures, you should + supply some command-line options at build time by entering them + here. + +config CMDLINE_FORCE + bool "Force default kernel command string" + depends on CMDLINE_BOOL + default n + help + Set this to have arguments from the default kernel command string + override those passed by the boot loader. + +config SECCOMP + bool "Enable seccomp to safely compute untrusted bytecode" + depends on PROC_FS + default y + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via /proc/<pid>/seccomp, it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + +endmenu + +menu "Kernel features" + +config NR_CPUS + int + default "1" + +config ADVANCED_OPTIONS + bool "Prompt for advanced kernel configuration options" + help + This option will enable prompting for a variety of advanced kernel + configuration options. These options can cause the kernel to not + work if they are set incorrectly, but can be used to optimize certain + aspects of kernel memory management. + + Unless you know what you are doing, say N here. + +comment "Default settings for advanced configuration options are used" + depends on !ADVANCED_OPTIONS + +config XILINX_UNCACHED_SHADOW + bool "Are you using uncached shadow for RAM ?" + depends on ADVANCED_OPTIONS && !MMU + default n + help + This is needed to be able to allocate uncachable memory regions. + The feature requires the design to define the RAM memory controller + window to be twice as large as the actual physical memory. + +config HIGHMEM + bool "High memory support" + depends on MMU + help + The address space of Microblaze processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address + space as well as some memory mapped IO. That means that, if you + have a large amount of physical memory and/or IO, not all of the + memory can be "permanently mapped" by the kernel. The physical + memory that is not permanently mapped is called "high memory". + + If unsure, say n. + +config LOWMEM_SIZE_BOOL + bool "Set maximum low memory" + depends on ADVANCED_OPTIONS && MMU + help + This option allows you to set the maximum amount of memory which + will be used as "low memory", that is, memory which the kernel can + access directly, without having to set up a kernel virtual mapping. + This can be useful in optimizing the layout of kernel virtual + memory. + + Say N here unless you know what you are doing. + +config LOWMEM_SIZE + hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL + default "0x30000000" + +config MANUAL_RESET_VECTOR + hex "Microblaze reset vector address setup" + default "0x0" + help + Set this option to have the kernel override the CPU Reset vector. + If zero, no change will be made to the MicroBlaze reset vector at + address 0x0. + If non-zero, a jump instruction to this address, will be written + to the reset vector at address 0x0. + If you are unsure, set it to default value 0x0. + +config KERNEL_START_BOOL + bool "Set custom kernel base address" + depends on ADVANCED_OPTIONS + help + This option allows you to set the kernel virtual address at which + the kernel will map low memory (the kernel image will be linked at + this address). This can be useful in optimizing the virtual memory + layout of the system. + + Say N here unless you know what you are doing. + +config KERNEL_START + hex "Virtual address of kernel base" if KERNEL_START_BOOL + default "0xc0000000" if MMU + default KERNEL_BASE_ADDR if !MMU + +config TASK_SIZE_BOOL + bool "Set custom user task size" + depends on ADVANCED_OPTIONS && MMU + help + This option allows you to set the amount of virtual address space + allocated to user tasks. This can be useful in optimizing the + virtual memory layout of the system. + + Say N here unless you know what you are doing. + +config TASK_SIZE + hex "Size of user task space" if TASK_SIZE_BOOL + default "0x80000000" + +choice + prompt "Page size" + default MICROBLAZE_4K_PAGES + depends on ADVANCED_OPTIONS && !MMU + help + Select the kernel logical page size. Increasing the page size + will reduce software overhead at each page boundary, allow + hardware prefetch mechanisms to be more effective, and allow + larger dma transfers increasing IO efficiency and reducing + overhead. However the utilization of memory will increase. + For example, each cached file will using a multiple of the + page size to hold its contents and the difference between the + end of file and the end of page is wasted. + + If unsure, choose 4K_PAGES. + +config MICROBLAZE_4K_PAGES + bool "4k page size" + +config MICROBLAZE_16K_PAGES + bool "16k page size" + +config MICROBLAZE_64K_PAGES + bool "64k page size" + +endchoice + +endmenu + +menu "Bus Options" + +config PCI + bool "PCI support" + +config PCI_DOMAINS + def_bool PCI + +config PCI_DOMAINS_GENERIC + def_bool PCI_DOMAINS + +config PCI_SYSCALL + def_bool PCI + +config PCI_XILINX + bool "Xilinx PCI host bridge support" + depends on PCI + +source "drivers/pci/Kconfig" + +endmenu diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug new file mode 100644 index 000000000..dc2e3c45e --- /dev/null +++ b/arch/microblaze/Kconfig.debug @@ -0,0 +1,5 @@ +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. + +config TRACE_IRQFLAGS_SUPPORT + def_bool y diff --git a/arch/microblaze/Kconfig.platform b/arch/microblaze/Kconfig.platform new file mode 100644 index 000000000..f7f1739c1 --- /dev/null +++ b/arch/microblaze/Kconfig.platform @@ -0,0 +1,70 @@ +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# +# Platform selection Kconfig menu for MicroBlaze targets +# + +menu "Platform options" + +config OPT_LIB_FUNCTION + bool "Optimalized lib function" + default y + help + Allows turn on optimalized library function (memcpy and memmove). + They are optimized by using word alignment. This will work + fine if both source and destination are aligned on the same + boundary. However, if they are aligned on different boundaries + shifts will be necessary. This might result in bad performance + on MicroBlaze systems without a barrel shifter. + +config OPT_LIB_ASM + bool "Optimalized lib function ASM" + depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1) + depends on CPU_BIG_ENDIAN + default n + help + Allows turn on optimalized library function (memcpy and memmove). + Function are written in asm code. + +# Definitions for MICROBLAZE0 +comment "Definitions for MICROBLAZE0" + +config KERNEL_BASE_ADDR + hex "Physical address where Linux Kernel is" + default "0x90000000" + help + BASE Address for kernel + +config XILINX_MICROBLAZE0_FAMILY + string "Targeted FPGA family" + default "virtex5" + +config XILINX_MICROBLAZE0_USE_MSR_INSTR + int "USE_MSR_INSTR range (0:1)" + default 0 + +config XILINX_MICROBLAZE0_USE_PCMP_INSTR + int "USE_PCMP_INSTR range (0:1)" + default 0 + +config XILINX_MICROBLAZE0_USE_BARREL + int "USE_BARREL range (0:1)" + default 0 + +config XILINX_MICROBLAZE0_USE_DIV + int "USE_DIV range (0:1)" + default 0 + +config XILINX_MICROBLAZE0_USE_HW_MUL + int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)" + default 0 + +config XILINX_MICROBLAZE0_USE_FPU + int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)" + default 0 + +config XILINX_MICROBLAZE0_HW_VER + string "Core version number" + default 7.10.d + +endmenu diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile new file mode 100644 index 000000000..548bac6c6 --- /dev/null +++ b/arch/microblaze/Makefile @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: GPL-2.0 +KBUILD_DEFCONFIG := mmu_defconfig + +ifeq ($(CONFIG_MMU),y) +UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\" +else +UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\" +endif + +# What CPU vesion are we building for, and crack it open +# as major.minor.rev +CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER)) +CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1) +CPU_MINOR := $(shell echo $(CPU_VER) | cut -d '.' -f 2) +CPU_REV := $(shell echo $(CPU_VER) | cut -d '.' -f 3) + +export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV + +# Use cpu-related CONFIG_ vars to set compile options. +# The various CONFIG_XILINX cpu features options are integers 0/1/2... +# rather than bools y/n + +# Work out HW multipler support. This is tricky. +# 1. Spartan2 has no HW multipliers. +# 2. MicroBlaze v3.x always uses them, except in Spartan 2 +# 3. All other FPGa/CPU ver combos, we can trust the CONFIG_ settings +ifeq (,$(findstring spartan2,$(CONFIG_XILINX_MICROBLAZE0_FAMILY))) + ifeq ($(CPU_MAJOR),3) + CPUFLAGS-1 += -mno-xl-soft-mul + else + # USE_HW_MUL can be 0, 1, or 2, defining a hierarchy of HW Mul support. + CPUFLAGS-$(subst 1,,$(CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL)) += -mxl-multiply-high + CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL) += -mno-xl-soft-mul + endif +endif +CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_DIV) += -mno-xl-soft-div +CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_BARREL) += -mxl-barrel-shift +CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare + +ifdef CONFIG_CPU_BIG_ENDIAN +KBUILD_CFLAGS += -mbig-endian +KBUILD_AFLAGS += -mbig-endian +KBUILD_LDFLAGS += -EB +else +KBUILD_CFLAGS += -mlittle-endian +KBUILD_AFLAGS += -mlittle-endian +KBUILD_LDFLAGS += -EL +endif + +CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) + +# r31 holds current when in kernel mode +KBUILD_CFLAGS += -ffixed-r31 $(CPUFLAGS-y) $(CPUFLAGS-1) $(CPUFLAGS-2) + +head-y := arch/microblaze/kernel/head.o +libs-y += arch/microblaze/lib/ +core-y += arch/microblaze/kernel/ +core-y += arch/microblaze/mm/ +core-$(CONFIG_PCI) += arch/microblaze/pci/ + +drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/ + +boot := arch/microblaze/boot + +# Are we making a simpleImage.<boardname> target? If so, crack out the boardname +DTB:=$(subst simpleImage.,,$(filter simpleImage.%, $(MAKECMDGOALS))) + +ifneq ($(DTB),) + core-y += $(boot)/dts/ +endif + +# defines filename extension depending memory management type +ifeq ($(CONFIG_MMU),) +MMU := -nommu +endif + +export MMU DTB + +all: linux.bin + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + +linux.bin linux.bin.gz linux.bin.ub: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')' + +simpleImage.%: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(addprefix $(boot)/$@., ub unstrip strip) + @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')' + +define archhelp + echo '* linux.bin - Create raw binary' + echo ' linux.bin.gz - Create compressed raw binary' + echo ' linux.bin.ub - Create U-Boot wrapped raw binary' + echo ' simpleImage.<dt> - Create the following images with <dt>.dtb linked in' + echo ' simpleImage.<dt> : raw image' + echo ' simpleImage.<dt>.ub : raw image with U-Boot header' + echo ' simpleImage.<dt>.unstrip: ELF (identical to vmlinux)' + echo ' simpleImage.<dt>.strip : stripped ELF' + echo ' Targets with <dt> embed a device tree blob inside the image' + echo ' These targets support board with firmware that does not' + echo ' support passing a device tree directly. Replace <dt> with the' + echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' + echo ' (minus the .dts extension).' +endef + +MRPROPER_FILES += $(boot)/simpleImage.* diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore new file mode 100644 index 000000000..679502d64 --- /dev/null +++ b/arch/microblaze/boot/.gitignore @@ -0,0 +1,2 @@ +linux.bin* +simpleImage.* diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile new file mode 100644 index 000000000..cff570a71 --- /dev/null +++ b/arch/microblaze/boot/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# arch/microblaze/boot/Makefile +# + +targets := linux.bin linux.bin.gz linux.bin.ub simpleImage.* + +OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary + +$(obj)/linux.bin: vmlinux FORCE + $(call if_changed,objcopy) + +$(obj)/linux.bin.ub: $(obj)/linux.bin FORCE + $(call if_changed,uimage) + +$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE + $(call if_changed,gzip) + +quiet_cmd_strip = STRIP $< $@$2 + cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ + -K _fdt_start $< -o $@$2 + +UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) + +$(obj)/simpleImage.$(DTB): vmlinux FORCE + $(call if_changed,objcopy) + +$(obj)/simpleImage.$(DTB).ub: $(obj)/simpleImage.$(DTB) FORCE + $(call if_changed,uimage) + +$(obj)/simpleImage.$(DTB).unstrip: vmlinux FORCE + $(call if_changed,shipped) + +$(obj)/simpleImage.$(DTB).strip: vmlinux FORCE + $(call if_changed,strip) diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile new file mode 100644 index 000000000..1f77913d4 --- /dev/null +++ b/arch/microblaze/boot/dts/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# + +obj-y += linked_dtb.o + +# Ensure system.dtb exists +$(obj)/linked_dtb.o: $(obj)/system.dtb + +# Generate system.dtb from $(DTB).dtb +ifneq ($(DTB),system) +$(obj)/system.dtb: $(obj)/$(DTB).dtb + $(call if_changed,cp) +endif + +quiet_cmd_cp = CP $< $@$2 + cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) + +# Rule to build device tree blobs +DTC_FLAGS := -p 1024 diff --git a/arch/microblaze/boot/dts/linked_dtb.S b/arch/microblaze/boot/dts/linked_dtb.S new file mode 100644 index 000000000..23345af37 --- /dev/null +++ b/arch/microblaze/boot/dts/linked_dtb.S @@ -0,0 +1,2 @@ +.section __fdt_blob,"a" +.incbin "arch/microblaze/boot/dts/system.dtb" diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts new file mode 100644 index 000000000..8a420c670 --- /dev/null +++ b/arch/microblaze/boot/dts/system.dts @@ -0,0 +1,366 @@ +/* + * Device Tree Generator version: 1.1 + * + * (C) Copyright 2007-2008 Xilinx, Inc. + * (C) Copyright 2007-2009 Michal Simek + * + * Michal SIMEK <monstr@monstr.eu> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + * CAUTION: This file is automatically generated by libgen. + * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6 + * + * XPS project directory: Xilinx-ML505-ll_temac-sgdma-MMU-FDT-edk101 + */ + +/dts-v1/; +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "xlnx,microblaze"; + hard-reset-gpios = <&LEDs_8Bit 2 1>; + model = "testing"; + DDR2_SDRAM: memory@90000000 { + device_type = "memory"; + reg = < 0x90000000 0x10000000 >; + } ; + aliases { + ethernet0 = &Hard_Ethernet_MAC; + serial0 = &RS232_Uart_1; + } ; + chosen { + bootargs = "console=ttyUL0,115200 highres=on"; + stdout-path = "/plb@0/serial@84000000"; + } ; + cpus { + #address-cells = <1>; + #cpus = <0x1>; + #size-cells = <0>; + microblaze_0: cpu@0 { + clock-frequency = <125000000>; + compatible = "xlnx,microblaze-7.10.d"; + d-cache-baseaddr = <0x90000000>; + d-cache-highaddr = <0x9fffffff>; + d-cache-line-size = <0x10>; + d-cache-size = <0x2000>; + device_type = "cpu"; + i-cache-baseaddr = <0x90000000>; + i-cache-highaddr = <0x9fffffff>; + i-cache-line-size = <0x10>; + i-cache-size = <0x2000>; + model = "microblaze,7.10.d"; + reg = <0>; + timebase-frequency = <125000000>; + xlnx,addr-tag-bits = <0xf>; + xlnx,allow-dcache-wr = <0x1>; + xlnx,allow-icache-wr = <0x1>; + xlnx,area-optimized = <0x0>; + xlnx,cache-byte-size = <0x2000>; + xlnx,d-lmb = <0x1>; + xlnx,d-opb = <0x0>; + xlnx,d-plb = <0x1>; + xlnx,data-size = <0x20>; + xlnx,dcache-addr-tag = <0xf>; + xlnx,dcache-always-used = <0x1>; + xlnx,dcache-byte-size = <0x2000>; + xlnx,dcache-line-len = <0x4>; + xlnx,dcache-use-fsl = <0x1>; + xlnx,debug-enabled = <0x1>; + xlnx,div-zero-exception = <0x1>; + xlnx,dopb-bus-exception = <0x0>; + xlnx,dynamic-bus-sizing = <0x1>; + xlnx,edge-is-positive = <0x1>; + xlnx,family = "virtex5"; + xlnx,endianness = <0x1>; + xlnx,fpu-exception = <0x1>; + xlnx,fsl-data-size = <0x20>; + xlnx,fsl-exception = <0x0>; + xlnx,fsl-links = <0x0>; + xlnx,i-lmb = <0x1>; + xlnx,i-opb = <0x0>; + xlnx,i-plb = <0x1>; + xlnx,icache-always-used = <0x1>; + xlnx,icache-line-len = <0x4>; + xlnx,icache-use-fsl = <0x1>; + xlnx,ill-opcode-exception = <0x1>; + xlnx,instance = "microblaze_0"; + xlnx,interconnect = <0x1>; + xlnx,interrupt-is-edge = <0x0>; + xlnx,iopb-bus-exception = <0x0>; + xlnx,mmu-dtlb-size = <0x4>; + xlnx,mmu-itlb-size = <0x2>; + xlnx,mmu-tlb-access = <0x3>; + xlnx,mmu-zones = <0x10>; + xlnx,number-of-pc-brk = <0x1>; + xlnx,number-of-rd-addr-brk = <0x0>; + xlnx,number-of-wr-addr-brk = <0x0>; + xlnx,opcode-0x0-illegal = <0x1>; + xlnx,pvr = <0x2>; + xlnx,pvr-user1 = <0x0>; + xlnx,pvr-user2 = <0x0>; + xlnx,reset-msr = <0x0>; + xlnx,sco = <0x0>; + xlnx,unaligned-exceptions = <0x1>; + xlnx,use-barrel = <0x1>; + xlnx,use-dcache = <0x1>; + xlnx,use-div = <0x1>; + xlnx,use-ext-brk = <0x1>; + xlnx,use-ext-nm-brk = <0x1>; + xlnx,use-extended-fsl-instr = <0x0>; + xlnx,use-fpu = <0x2>; + xlnx,use-hw-mul = <0x2>; + xlnx,use-icache = <0x1>; + xlnx,use-interrupt = <0x1>; + xlnx,use-mmu = <0x3>; + xlnx,use-msr-instr = <0x1>; + xlnx,use-pcmp-instr = <0x1>; + } ; + } ; + mb_plb: plb@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus"; + ranges ; + FLASH: flash@a0000000 { + bank-width = <2>; + compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash"; + reg = < 0xa0000000 0x2000000 >; + xlnx,family = "virtex5"; + xlnx,include-datawidth-matching-0 = <0x1>; + xlnx,include-datawidth-matching-1 = <0x0>; + xlnx,include-datawidth-matching-2 = <0x0>; + xlnx,include-datawidth-matching-3 = <0x0>; + xlnx,include-negedge-ioregs = <0x0>; + xlnx,include-plb-ipif = <0x1>; + xlnx,include-wrbuf = <0x1>; + xlnx,max-mem-width = <0x10>; + xlnx,mch-native-dwidth = <0x20>; + xlnx,mch-plb-clk-period-ps = <0x1f40>; + xlnx,mch-splb-awidth = <0x20>; + xlnx,mch0-accessbuf-depth = <0x10>; + xlnx,mch0-protocol = <0x0>; + xlnx,mch0-rddatabuf-depth = <0x10>; + xlnx,mch1-accessbuf-depth = <0x10>; + xlnx,mch1-protocol = <0x0>; + xlnx,mch1-rddatabuf-depth = <0x10>; + xlnx,mch2-accessbuf-depth = <0x10>; + xlnx,mch2-protocol = <0x0>; + xlnx,mch2-rddatabuf-depth = <0x10>; + xlnx,mch3-accessbuf-depth = <0x10>; + xlnx,mch3-protocol = <0x0>; + xlnx,mch3-rddatabuf-depth = <0x10>; + xlnx,mem0-width = <0x10>; + xlnx,mem1-width = <0x20>; + xlnx,mem2-width = <0x20>; + xlnx,mem3-width = <0x20>; + xlnx,num-banks-mem = <0x1>; + xlnx,num-channels = <0x0>; + xlnx,priority-mode = <0x0>; + xlnx,synch-mem-0 = <0x0>; + xlnx,synch-mem-1 = <0x0>; + xlnx,synch-mem-2 = <0x0>; + xlnx,synch-mem-3 = <0x0>; + xlnx,synch-pipedelay-0 = <0x2>; + xlnx,synch-pipedelay-1 = <0x2>; + xlnx,synch-pipedelay-2 = <0x2>; + xlnx,synch-pipedelay-3 = <0x2>; + xlnx,tavdv-ps-mem-0 = <0x1adb0>; + xlnx,tavdv-ps-mem-1 = <0x3a98>; + xlnx,tavdv-ps-mem-2 = <0x3a98>; + xlnx,tavdv-ps-mem-3 = <0x3a98>; + xlnx,tcedv-ps-mem-0 = <0x1adb0>; + xlnx,tcedv-ps-mem-1 = <0x3a98>; + xlnx,tcedv-ps-mem-2 = <0x3a98>; + xlnx,tcedv-ps-mem-3 = <0x3a98>; + xlnx,thzce-ps-mem-0 = <0x88b8>; + xlnx,thzce-ps-mem-1 = <0x1b58>; + xlnx,thzce-ps-mem-2 = <0x1b58>; + xlnx,thzce-ps-mem-3 = <0x1b58>; + xlnx,thzoe-ps-mem-0 = <0x1b58>; + xlnx,thzoe-ps-mem-1 = <0x1b58>; + xlnx,thzoe-ps-mem-2 = <0x1b58>; + xlnx,thzoe-ps-mem-3 = <0x1b58>; + xlnx,tlzwe-ps-mem-0 = <0x88b8>; + xlnx,tlzwe-ps-mem-1 = <0x0>; + xlnx,tlzwe-ps-mem-2 = <0x0>; + xlnx,tlzwe-ps-mem-3 = <0x0>; + xlnx,twc-ps-mem-0 = <0x2af8>; + xlnx,twc-ps-mem-1 = <0x3a98>; + xlnx,twc-ps-mem-2 = <0x3a98>; + xlnx,twc-ps-mem-3 = <0x3a98>; + xlnx,twp-ps-mem-0 = <0x11170>; + xlnx,twp-ps-mem-1 = <0x2ee0>; + xlnx,twp-ps-mem-2 = <0x2ee0>; + xlnx,twp-ps-mem-3 = <0x2ee0>; + xlnx,xcl0-linesize = <0x4>; + xlnx,xcl0-writexfer = <0x1>; + xlnx,xcl1-linesize = <0x4>; + xlnx,xcl1-writexfer = <0x1>; + xlnx,xcl2-linesize = <0x4>; + xlnx,xcl2-writexfer = <0x1>; + xlnx,xcl3-linesize = <0x4>; + xlnx,xcl3-writexfer = <0x1>; + } ; + Hard_Ethernet_MAC: xps-ll-temac@81c00000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "xlnx,compound"; + ranges ; + ethernet@81c00000 { + compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 5 2 >; + llink-connected = <&PIM3>; + local-mac-address = [ 00 0a 35 00 00 00 ]; + reg = < 0x81c00000 0x40 >; + xlnx,bus2core-clk-ratio = <0x1>; + xlnx,phy-type = <0x1>; + xlnx,phyaddr = <0x1>; + xlnx,rxcsum = <0x0>; + xlnx,rxfifo = <0x1000>; + xlnx,temac-type = <0x0>; + xlnx,txcsum = <0x0>; + xlnx,txfifo = <0x1000>; + } ; + } ; + IIC_EEPROM: i2c@81600000 { + compatible = "xlnx,xps-iic-2.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 6 2 >; + reg = < 0x81600000 0x10000 >; + xlnx,clk-freq = <0x7735940>; + xlnx,family = "virtex5"; + xlnx,gpo-width = <0x1>; + xlnx,iic-freq = <0x186a0>; + xlnx,scl-inertial-delay = <0x0>; + xlnx,sda-inertial-delay = <0x0>; + xlnx,ten-bit-adr = <0x0>; + } ; + LEDs_8Bit: gpio@81400000 { + compatible = "xlnx,xps-gpio-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 7 2 >; + reg = < 0x81400000 0x10000 >; + xlnx,all-inputs = <0x0>; + xlnx,all-inputs-2 = <0x0>; + xlnx,dout-default = <0x0>; + xlnx,dout-default-2 = <0x0>; + xlnx,family = "virtex5"; + xlnx,gpio-width = <0x8>; + xlnx,interrupt-present = <0x1>; + xlnx,is-bidir = <0x1>; + xlnx,is-bidir-2 = <0x1>; + xlnx,is-dual = <0x0>; + xlnx,tri-default = <0xffffffff>; + xlnx,tri-default-2 = <0xffffffff>; + #gpio-cells = <2>; + gpio-controller; + } ; + + gpio-leds { + compatible = "gpio-leds"; + + heartbeat { + label = "Heartbeat"; + gpios = <&LEDs_8Bit 4 1>; + linux,default-trigger = "heartbeat"; + }; + + yellow { + label = "Yellow"; + gpios = <&LEDs_8Bit 5 1>; + }; + + red { + label = "Red"; + gpios = <&LEDs_8Bit 6 1>; + }; + + green { + label = "Green"; + gpios = <&LEDs_8Bit 7 1>; + }; + } ; + RS232_Uart_1: serial@84000000 { + clock-frequency = <125000000>; + compatible = "xlnx,xps-uartlite-1.00.a"; + current-speed = <115200>; + device_type = "serial"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 8 0 >; + port-number = <0>; + reg = < 0x84000000 0x10000 >; + xlnx,baudrate = <0x1c200>; + xlnx,data-bits = <0x8>; + xlnx,family = "virtex5"; + xlnx,odd-parity = <0x0>; + xlnx,use-parity = <0x0>; + } ; + SysACE_CompactFlash: sysace@83600000 { + compatible = "xlnx,xps-sysace-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 4 2 >; + reg = < 0x83600000 0x10000 >; + xlnx,family = "virtex5"; + xlnx,mem-width = <0x10>; + } ; + debug_module: debug@84400000 { + compatible = "xlnx,mdm-1.00.d"; + reg = < 0x84400000 0x10000 >; + xlnx,family = "virtex5"; + xlnx,interconnect = <0x1>; + xlnx,jtag-chain = <0x2>; + xlnx,mb-dbg-ports = <0x1>; + xlnx,uart-width = <0x8>; + xlnx,use-uart = <0x1>; + xlnx,write-fsl-ports = <0x0>; + } ; + mpmc@90000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "xlnx,mpmc-4.02.a"; + ranges ; + PIM3: sdma@84600180 { + compatible = "xlnx,ll-dma-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 2 2 1 2 >; + reg = < 0x84600180 0x80 >; + } ; + } ; + xps_intc_0: interrupt-controller@81800000 { + #interrupt-cells = <0x2>; + compatible = "xlnx,xps-intc-1.00.a"; + interrupt-controller ; + reg = < 0x81800000 0x10000 >; + xlnx,kind-of-intr = <0x100>; + xlnx,num-intr-inputs = <0x9>; + } ; + xps_timer_1: timer@83c00000 { + compatible = "xlnx,xps-timer-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 3 2 >; + reg = < 0x83c00000 0x10000 >; + xlnx,count-width = <0x20>; + xlnx,family = "virtex5"; + xlnx,gen0-assert = <0x1>; + xlnx,gen1-assert = <0x1>; + xlnx,one-timer-only = <0x0>; + xlnx,trig0-assert = <0x1>; + xlnx,trig1-assert = <0x1>; + } ; + } ; +} ; diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig new file mode 100644 index 000000000..92fd4e95b --- /dev/null +++ b/arch/microblaze/configs/mmu_defconfig @@ -0,0 +1,88 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_BASE_FULL is not set +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_EFI_PARTITION is not set +CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 +CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 +CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 +CONFIG_XILINX_MICROBLAZE0_USE_DIV=1 +CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2 +CONFIG_XILINX_MICROBLAZE0_USE_FPU=2 +CONFIG_HZ_100=y +CONFIG_MMU=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE_FORCE=y +CONFIG_HIGHMEM=y +CONFIG_PCI=y +CONFIG_PCI_XILINX=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_IPV6 is not set +CONFIG_BRIDGE=m +CONFIG_MTD=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_NETDEVICES=y +CONFIG_XILINX_EMACLITE=y +CONFIG_XILINX_LL_TEMAC=y +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_UARTLITE=y +CONFIG_SERIAL_UARTLITE_CONSOLE=y +# CONFIG_HW_RANDOM is not set +CONFIG_XILINX_HWICAP=y +CONFIG_I2C=y +CONFIG_I2C_XILINX=y +CONFIG_SPI=y +CONFIG_SPI_XILINX=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_XILINX=y +# CONFIG_HWMON is not set +CONFIG_WATCHDOG=y +CONFIG_XILINX_WATCHDOG=y +CONFIG_FB=y +CONFIG_FB_XILINX=y +# CONFIG_USB_SUPPORT is not set +CONFIG_UIO=y +CONFIG_UIO_PDRV_GENIRQ=y +CONFIG_UIO_DMEM_GENIRQ=y +CONFIG_EXT2_FS=y +# CONFIG_DNOTIFY is not set +CONFIG_CRAMFS=y +CONFIG_ROMFS_FS=y +CONFIG_NFS_FS=y +CONFIG_CIFS=y +CONFIG_CIFS_STATS=y +CONFIG_CIFS_STATS2=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_SLAB=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_KGDB=y +CONFIG_KGDB_TESTS=y +CONFIG_KGDB_KDB=y +CONFIG_EARLY_PRINTK=y +CONFIG_KEYS=y +CONFIG_ENCRYPTED_KEYS=y diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig new file mode 100644 index 000000000..06d69a6e1 --- /dev/null +++ b/arch/microblaze/configs/nommu_defconfig @@ -0,0 +1,89 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_BASE_FULL is not set +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_EFI_PARTITION is not set +CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 +CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 +CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 +CONFIG_XILINX_MICROBLAZE0_USE_DIV=1 +CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2 +CONFIG_XILINX_MICROBLAZE0_USE_FPU=2 +CONFIG_HZ_100=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE_FORCE=y +CONFIG_PCI=y +CONFIG_PCI_XILINX=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_IPV6 is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_RAM=y +CONFIG_MTD_UCLINUX=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_NETDEVICES=y +CONFIG_XILINX_EMACLITE=y +CONFIG_XILINX_LL_TEMAC=y +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_UARTLITE=y +CONFIG_SERIAL_UARTLITE_CONSOLE=y +# CONFIG_HW_RANDOM is not set +CONFIG_XILINX_HWICAP=y +CONFIG_I2C=y +CONFIG_I2C_XILINX=y +CONFIG_SPI=y +CONFIG_SPI_XILINX=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_XILINX=y +# CONFIG_HWMON is not set +CONFIG_WATCHDOG=y +CONFIG_XILINX_WATCHDOG=y +CONFIG_FB=y +CONFIG_FB_XILINX=y +# CONFIG_USB_SUPPORT is not set +CONFIG_EXT2_FS=y +# CONFIG_DNOTIFY is not set +CONFIG_CRAMFS=y +CONFIG_ROMFS_FS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NLS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_SLAB=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_EARLY_PRINTK=y +CONFIG_KEYS=y +CONFIG_ENCRYPTED_KEYS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_DES=y diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild new file mode 100644 index 000000000..569ba9e67 --- /dev/null +++ b/arch/microblaze/include/asm/Kbuild @@ -0,0 +1,33 @@ +generic-y += barrier.h +generic-y += bitops.h +generic-y += bug.h +generic-y += bugs.h +generic-y += compat.h +generic-y += device.h +generic-y += div64.h +generic-y += dma-mapping.h +generic-y += emergency-restart.h +generic-y += exec.h +generic-y += extable.h +generic-y += fb.h +generic-y += hardirq.h +generic-y += irq_regs.h +generic-y += irq_work.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kprobes.h +generic-y += linkage.h +generic-y += local.h +generic-y += local64.h +generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h +generic-y += parport.h +generic-y += percpu.h +generic-y += preempt.h +generic-y += serial.h +generic-y += syscalls.h +generic-y += topology.h +generic-y += trace_clock.h +generic-y += vga.h +generic-y += word-at-a-time.h +generic-y += xor.h diff --git a/arch/microblaze/include/asm/asm-compat.h b/arch/microblaze/include/asm/asm-compat.h new file mode 100644 index 000000000..c05259ce2 --- /dev/null +++ b/arch/microblaze/include/asm/asm-compat.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MICROBLAZE_ASM_COMPAT_H +#define _ASM_MICROBLAZE_ASM_COMPAT_H + +#include <asm/types.h> + +#ifdef __ASSEMBLY__ +# define stringify_in_c(...) __VA_ARGS__ +# define ASM_CONST(x) x +#else +/* This version of stringify will deal with commas... */ +# define __stringify_in_c(...) #__VA_ARGS__ +# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " +# define __ASM_CONST(x) x##UL +# define ASM_CONST(x) __ASM_CONST(x) +#endif + +#endif /* _ASM_MICROBLAZE_ASM_COMPAT_H */ diff --git a/arch/microblaze/include/asm/asm-offsets.h b/arch/microblaze/include/asm/asm-offsets.h new file mode 100644 index 000000000..d370ee36a --- /dev/null +++ b/arch/microblaze/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h new file mode 100644 index 000000000..41e9aff23 --- /dev/null +++ b/arch/microblaze/include/asm/atomic.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MICROBLAZE_ATOMIC_H +#define _ASM_MICROBLAZE_ATOMIC_H + +#include <asm/cmpxchg.h> +#include <asm-generic/atomic.h> +#include <asm-generic/atomic64.h> + +/* + * Atomically test *v and decrement if it is greater than 0. + * The function returns the old value of *v minus 1. + */ +static inline int atomic_dec_if_positive(atomic_t *v) +{ + unsigned long flags; + int res; + + local_irq_save(flags); + res = v->counter - 1; + if (res >= 0) + v->counter = res; + local_irq_restore(flags); + + return res; +} +#define atomic_dec_if_positive atomic_dec_if_positive + +#endif /* _ASM_MICROBLAZE_ATOMIC_H */ diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h new file mode 100644 index 000000000..4efe96a03 --- /dev/null +++ b/arch/microblaze/include/asm/cache.h @@ -0,0 +1,24 @@ +/* + * Cache operations + * + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#ifndef _ASM_MICROBLAZE_CACHE_H +#define _ASM_MICROBLAZE_CACHE_H + +#include <asm/registers.h> + +#define L1_CACHE_SHIFT 5 +/* word-granular cache in microblaze */ +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#endif /* _ASM_MICROBLAZE_CACHE_H */ diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h new file mode 100644 index 000000000..b091de77b --- /dev/null +++ b/arch/microblaze/include/asm/cacheflush.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * based on v850 version which was + * Copyright (C) 2001,02,03 NEC Electronics Corporation + * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + */ + +#ifndef _ASM_MICROBLAZE_CACHEFLUSH_H +#define _ASM_MICROBLAZE_CACHEFLUSH_H + +/* Somebody depends on this; sigh... */ +#include <linux/mm.h> +#include <linux/io.h> + +/* Look at Documentation/core-api/cachetlb.rst */ + +/* + * Cache handling functions. + * Microblaze has a write-through data cache, meaning that the data cache + * never needs to be flushed. The only flushing operations that are + * implemented are to invalidate the instruction cache. These are called + * after loading a user application into memory, we must invalidate the + * instruction cache to make sure we don't fetch old, bad code. + */ + +/* struct cache, d=dcache, i=icache, fl = flush, iv = invalidate, + * suffix r = range */ +struct scache { + /* icache */ + void (*ie)(void); /* enable */ + void (*id)(void); /* disable */ + void (*ifl)(void); /* flush */ + void (*iflr)(unsigned long a, unsigned long b); + void (*iin)(void); /* invalidate */ + void (*iinr)(unsigned long a, unsigned long b); + /* dcache */ + void (*de)(void); /* enable */ + void (*dd)(void); /* disable */ + void (*dfl)(void); /* flush */ + void (*dflr)(unsigned long a, unsigned long b); + void (*din)(void); /* invalidate */ + void (*dinr)(unsigned long a, unsigned long b); +}; + +/* microblaze cache */ +extern struct scache *mbc; + +void microblaze_cache_init(void); + +#define enable_icache() mbc->ie(); +#define disable_icache() mbc->id(); +#define flush_icache() mbc->ifl(); +#define flush_icache_range(start, end) mbc->iflr(start, end); +#define invalidate_icache() mbc->iin(); +#define invalidate_icache_range(start, end) mbc->iinr(start, end); + +#define flush_icache_user_range(vma, pg, adr, len) flush_icache(); +#define flush_icache_page(vma, pg) do { } while (0) + +#define enable_dcache() mbc->de(); +#define disable_dcache() mbc->dd(); +/* FIXME for LL-temac driver */ +#define invalidate_dcache() mbc->din(); +#define invalidate_dcache_range(start, end) mbc->dinr(start, end); +#define flush_dcache() mbc->dfl(); +#define flush_dcache_range(start, end) mbc->dflr(start, end); + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +/* MS: We have to implement it because of rootfs-jffs2 issue on WB */ +#define flush_dcache_page(page) \ +do { \ + unsigned long addr = (unsigned long) page_address(page); /* virtual */ \ + addr = (u32)virt_to_phys((void *)addr); \ + flush_dcache_range((unsigned) (addr), (unsigned) (addr) + PAGE_SIZE); \ +} while (0); + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) +#define flush_cache_mm(mm) do { } while (0) + +#define flush_cache_page(vma, vmaddr, pfn) \ + flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE); + +/* MS: kgdb code use this macro, wrong len with FLASH */ +#if 0 +#define flush_cache_range(vma, start, len) { \ + flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \ + flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \ +} +#endif + +#define flush_cache_range(vma, start, len) do { } while (0) + +static inline void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, void *src, int len) +{ + u32 addr = virt_to_phys(dst); + memcpy(dst, src, len); + if (vma->vm_flags & VM_EXEC) { + invalidate_icache_range(addr, addr + PAGE_SIZE); + flush_dcache_range(addr, addr + PAGE_SIZE); + } +} + +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, void *src, int len) +{ + memcpy(dst, src, len); +} + +#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ diff --git a/arch/microblaze/include/asm/checksum.h b/arch/microblaze/include/asm/checksum.h new file mode 100644 index 000000000..adeecebbb --- /dev/null +++ b/arch/microblaze/include/asm/checksum.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2008 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_CHECKSUM_H +#define _ASM_MICROBLAZE_CHECKSUM_H + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +#define csum_tcpudp_nofold csum_tcpudp_nofold +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) +{ + __asm__("add %0, %0, %1\n\t" + "addc %0, %0, %2\n\t" + "addc %0, %0, %3\n\t" + "addc %0, %0, r0\n\t" + : "+&d" (sum) + : "d" (saddr), "d" (daddr), +#ifdef __MICROBLAZEEL__ + "d" ((len + proto) << 8) +#else + "d" (len + proto) +#endif +); + return sum; +} + +#include <asm-generic/checksum.h> + +#endif /* _ASM_MICROBLAZE_CHECKSUM_H */ diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h new file mode 100644 index 000000000..596300c74 --- /dev/null +++ b/arch/microblaze/include/asm/cmpxchg.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MICROBLAZE_CMPXCHG_H +#define _ASM_MICROBLAZE_CMPXCHG_H + +#include <linux/irqflags.h> + +void __bad_xchg(volatile void *ptr, int size); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) +{ + unsigned long ret; + unsigned long flags; + + switch (size) { + case 1: + local_irq_save(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + local_irq_restore(flags); + break; + + case 4: + local_irq_save(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + local_irq_restore(flags); + break; + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + + return ret; +} + +#define xchg(ptr, x) \ + ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +#include <asm-generic/cmpxchg.h> +#include <asm-generic/cmpxchg-local.h> + +#endif /* _ASM_MICROBLAZE_CMPXCHG_H */ diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h new file mode 100644 index 000000000..8f4996730 --- /dev/null +++ b/arch/microblaze/include/asm/cpuinfo.h @@ -0,0 +1,108 @@ +/* + * Generic support for queying CPU info + * + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007 John Williams <jwilliams@itee.uq.edu.au> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#ifndef _ASM_MICROBLAZE_CPUINFO_H +#define _ASM_MICROBLAZE_CPUINFO_H + +#include <linux/of.h> + +/* CPU Version and FPGA Family code conversion table type */ +struct cpu_ver_key { + const char *s; + const unsigned k; +}; + +extern const struct cpu_ver_key cpu_ver_lookup[]; + +struct family_string_key { + const char *s; + const unsigned k; +}; + +extern const struct family_string_key family_string_lookup[]; + +struct cpuinfo { + /* Core CPU configuration */ + u32 use_instr; + u32 use_mult; + u32 use_fpu; + u32 use_exc; + u32 ver_code; + u32 mmu; + u32 mmu_privins; + u32 endian; + + /* CPU caches */ + u32 use_icache; + u32 icache_tagbits; + u32 icache_write; + u32 icache_line_length; + u32 icache_size; + unsigned long icache_base; + unsigned long icache_high; + + u32 use_dcache; + u32 dcache_tagbits; + u32 dcache_write; + u32 dcache_line_length; + u32 dcache_size; + u32 dcache_wb; + unsigned long dcache_base; + unsigned long dcache_high; + + /* Bus connections */ + u32 use_dopb; + u32 use_iopb; + u32 use_dlmb; + u32 use_ilmb; + u32 num_fsl; + + /* CPU interrupt line info */ + u32 irq_edge; + u32 irq_positive; + + u32 area_optimised; + + /* HW debug support */ + u32 hw_debug; + u32 num_pc_brk; + u32 num_rd_brk; + u32 num_wr_brk; + u32 cpu_clock_freq; /* store real freq of cpu */ + + /* FPGA family */ + u32 fpga_family_code; + + /* User define */ + u32 pvr_user1; + u32 pvr_user2; +}; + +extern struct cpuinfo cpuinfo; + +/* fwd declarations of the various CPUinfo populators */ +void setup_cpuinfo(void); +void setup_cpuinfo_clk(void); + +void set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu); +void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu); + +static inline unsigned int fcpu(struct device_node *cpu, char *n) +{ + u32 val = 0; + + of_property_read_u32(cpu, n, &val); + + return val; +} + +#endif /* _ASM_MICROBLAZE_CPUINFO_H */ diff --git a/arch/microblaze/include/asm/cputable.h b/arch/microblaze/include/asm/cputable.h new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/arch/microblaze/include/asm/cputable.h @@ -0,0 +1 @@ + diff --git a/arch/microblaze/include/asm/current.h b/arch/microblaze/include/asm/current.h new file mode 100644 index 000000000..29303ed82 --- /dev/null +++ b/arch/microblaze/include/asm/current.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_CURRENT_H +#define _ASM_MICROBLAZE_CURRENT_H + +/* + * Register used to hold the current task pointer while in the kernel. + * Any `call clobbered' register without a special meaning should be OK, + * but check asm/microblaze/kernel/entry.S to be sure. + */ +#define CURRENT_TASK r31 +# ifndef __ASSEMBLY__ +/* + * Dedicate r31 to keeping the current task pointer + */ +register struct task_struct *current asm("r31"); + +# define get_current() current +# endif /* __ASSEMBLY__ */ + +#endif /* _ASM_MICROBLAZE_CURRENT_H */ diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h new file mode 100644 index 000000000..ea2a9cd9b --- /dev/null +++ b/arch/microblaze/include/asm/delay.h @@ -0,0 +1,90 @@ +/* + * include/asm-microblaze/delay.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Michal Simek + * Copyright (C) 2007 John Williams + * Copyright (C) 2006 Atmark Techno, Inc. + */ + +#ifndef _ASM_MICROBLAZE_DELAY_H +#define _ASM_MICROBLAZE_DELAY_H + +#include <linux/param.h> + +static inline void __delay(unsigned long loops) +{ + asm volatile ("# __delay \n\t" \ + "1: addi %0, %0, -1\t\n" \ + "bneid %0, 1b \t\n" \ + "nop \t\n" + : "=r" (loops) + : "0" (loops)); +} + +/* + * Note that 19 * 226 == 4294 ==~ 2^32 / 10^6, so + * loops = (4294 * usecs * loops_per_jiffy * HZ) / 2^32. + * + * The mul instruction gives us loops = (a * b) / 2^32. + * We choose a = usecs * 19 * HZ and b = loops_per_jiffy * 226 + * because this lets us support a wide range of HZ and + * loops_per_jiffy values without either a or b overflowing 2^32. + * Thus we need usecs * HZ <= (2^32 - 1) / 19 = 226050910 and + * loops_per_jiffy <= (2^32 - 1) / 226 = 19004280 + * (which corresponds to ~3800 bogomips at HZ = 100). + * -- paulus + */ +#define __MAX_UDELAY (226050910UL/HZ) /* maximum udelay argument */ +#define __MAX_NDELAY (4294967295UL/HZ) /* maximum ndelay argument */ + +extern unsigned long loops_per_jiffy; + +static inline void __udelay(unsigned int x) +{ + + unsigned long long tmp = + (unsigned long long)x * (unsigned long long)loops_per_jiffy \ + * 226LL; + unsigned loops = tmp >> 32; + +/* + __asm__("mulxuu %0,%1,%2" : "=r" (loops) : + "r" (x), "r" (loops_per_jiffy * 226)); +*/ + __delay(loops); +} + +extern void __bad_udelay(void); /* deliberately undefined */ +extern void __bad_ndelay(void); /* deliberately undefined */ + +#define udelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / __MAX_UDELAY >= 1) \ + __bad_udelay(); \ + else \ + __udelay((n) * (19 * HZ)); \ + } else { \ + __udelay((n) * (19 * HZ)); \ + } \ + }) + +#define ndelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / __MAX_NDELAY >= 1) \ + __bad_ndelay(); \ + else \ + __udelay((n) * HZ); \ + } else { \ + __udelay((n) * HZ); \ + } \ + }) + +#define muldiv(a, b, c) (((a)*(b))/(c)) + +#endif /* _ASM_MICROBLAZE_DELAY_H */ diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h new file mode 100644 index 000000000..0d73d0c6d --- /dev/null +++ b/arch/microblaze/include/asm/dma.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_DMA_H +#define _ASM_MICROBLAZE_DMA_H + +#ifndef CONFIG_MMU +/* we don't have dma address limit. define it as zero to be + * unlimited. */ +#define MAX_DMA_ADDRESS (0) +#else +/* Virtual address corresponding to last available physical memory address. */ +#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) +#endif + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* _ASM_MICROBLAZE_DMA_H */ diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h new file mode 100644 index 000000000..659024449 --- /dev/null +++ b/arch/microblaze/include/asm/elf.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_MICROBLAZE_ELF_H +#define _ASM_MICROBLAZE_ELF_H + +#include <uapi/asm/elf.h> + +#ifndef __uClinux__ +#ifndef ELF_GREG_T +#endif +#ifndef ELF_NGREG +#endif +#ifndef ELF_GREGSET_T +#endif +#ifndef ELF_FPREGSET_T +#endif +#ifdef __MICROBLAZEEL__ +#else +#endif +#define SET_PERSONALITY(ex) \ + set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK))) +#endif /* __uClinux__ */ +#endif /* _ASM_MICROBLAZE_ELF_H */ diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h new file mode 100644 index 000000000..596e485ae --- /dev/null +++ b/arch/microblaze/include/asm/entry.h @@ -0,0 +1,37 @@ +/* + * Definitions used by low-level trap handlers + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#ifndef _ASM_MICROBLAZE_ENTRY_H +#define _ASM_MICROBLAZE_ENTRY_H + +#include <asm/percpu.h> +#include <asm/ptrace.h> +#include <linux/linkage.h> + +/* + * These are per-cpu variables required in entry.S, among other + * places + */ + +#define PER_CPU(var) var + +# ifndef __ASSEMBLY__ +DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ +DECLARE_PER_CPU(unsigned int, KM); /* Kernel/user mode */ +DECLARE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */ +DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ +DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ + +extern asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall); +# endif /* __ASSEMBLY__ */ + +#endif /* _ASM_MICROBLAZE_ENTRY_H */ diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h new file mode 100644 index 000000000..e6a8ddea1 --- /dev/null +++ b/arch/microblaze/include/asm/exceptions.h @@ -0,0 +1,77 @@ +/* + * Preliminary support for HW exception handing for Microblaze + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#ifndef _ASM_MICROBLAZE_EXCEPTIONS_H +#define _ASM_MICROBLAZE_EXCEPTIONS_H + +#ifdef __KERNEL__ + +#ifndef CONFIG_MMU +#define EX_HANDLER_STACK_SIZ (4*19) +#endif + +#ifndef __ASSEMBLY__ + +/* Macros to enable and disable HW exceptions in the MSR */ +/* Define MSR enable bit for HW exceptions */ +#define HWEX_MSR_BIT (1 << 8) + +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR +#define __enable_hw_exceptions() \ + __asm__ __volatile__ (" msrset r0, %0; \ + nop;" \ + : \ + : "i" (HWEX_MSR_BIT) \ + : "memory") + +#define __disable_hw_exceptions() \ + __asm__ __volatile__ (" msrclr r0, %0; \ + nop;" \ + : \ + : "i" (HWEX_MSR_BIT) \ + : "memory") +#else /* !CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ +#define __enable_hw_exceptions() \ + __asm__ __volatile__ (" \ + mfs r12, rmsr; \ + nop; \ + ori r12, r12, %0; \ + mts rmsr, r12; \ + nop;" \ + : \ + : "i" (HWEX_MSR_BIT) \ + : "memory", "r12") + +#define __disable_hw_exceptions() \ + __asm__ __volatile__ (" \ + mfs r12, rmsr; \ + nop; \ + andi r12, r12, ~%0; \ + mts rmsr, r12; \ + nop;" \ + : \ + : "i" (HWEX_MSR_BIT) \ + : "memory", "r12") +#endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ + +asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, + int fsr, int addr); + +asmlinkage void sw_exception(struct pt_regs *regs); +void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig); + +void die(const char *str, struct pt_regs *fp, long err); +void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); + +#endif /*__ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_MICROBLAZE_EXCEPTIONS_H */ diff --git a/arch/microblaze/include/asm/fixmap.h b/arch/microblaze/include/asm/fixmap.h new file mode 100644 index 000000000..06c0e2b18 --- /dev/null +++ b/arch/microblaze/include/asm/fixmap.h @@ -0,0 +1,69 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Copyright 2008 Freescale Semiconductor Inc. + * Port to powerpc added by Kumar Gala + * + * Copyright 2011 Michal Simek <monstr@monstr.eu> + * Copyright 2011 PetaLogix Qld Pty Ltd + * Port to Microblaze + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +#ifndef __ASSEMBLY__ +#include <linux/kernel.h> +#include <asm/page.h> +#ifdef CONFIG_HIGHMEM +#include <linux/threads.h> +#include <asm/kmap_types.h> +#endif + +#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ +enum fixed_addresses { + FIX_HOLE, +#ifdef CONFIG_HIGHMEM + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1, +#endif + __end_of_fixed_addresses +}; + +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); + +#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) + +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_CI + +#include <asm-generic/fixmap.h> + +#endif /* !__ASSEMBLY__ */ +#endif diff --git a/arch/microblaze/include/asm/flat.h b/arch/microblaze/include/asm/flat.h new file mode 100644 index 000000000..3d2747d4c --- /dev/null +++ b/arch/microblaze/include/asm/flat.h @@ -0,0 +1,89 @@ +/* + * uClinux flat-format executables + * + * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#ifndef _ASM_MICROBLAZE_FLAT_H +#define _ASM_MICROBLAZE_FLAT_H + +#include <asm/unaligned.h> + +#define flat_argvp_envp_on_stack() 0 +#define flat_old_ram_flag(flags) (flags) +#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) +#define flat_set_persistent(relval, p) 0 + +/* + * Microblaze works a little differently from other arches, because + * of the MICROBLAZE_64 reloc type. Here, a 32 bit address is split + * over two instructions, an 'imm' instruction which provides the top + * 16 bits, then the instruction "proper" which provides the low 16 + * bits. + */ + +/* + * Crack open a symbol reference and extract the address to be + * relocated. rp is a potentially unaligned pointer to the + * reference + */ + +static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, + u32 *addr, u32 *persistent) +{ + u32 *p = (__force u32 *)rp; + + /* Is it a split 64/32 reference? */ + if (relval & 0x80000000) { + /* Grab the two halves of the reference */ + u32 val_hi, val_lo; + + val_hi = get_unaligned(p); + val_lo = get_unaligned(p+1); + + /* Crack the address out */ + *addr = ((val_hi & 0xffff) << 16) + (val_lo & 0xffff); + } else { + /* Get the address straight out */ + *addr = get_unaligned(p); + } + + return 0; +} + +/* + * Insert an address into the symbol reference at rp. rp is potentially + * unaligned. + */ + +static inline int +flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 relval) +{ + u32 *p = (__force u32 *)rp; + /* Is this a split 64/32 reloc? */ + if (relval & 0x80000000) { + /* Get the two "halves" */ + unsigned long val_hi = get_unaligned(p); + unsigned long val_lo = get_unaligned(p + 1); + + /* insert the address */ + val_hi = (val_hi & 0xffff0000) | addr >> 16; + val_lo = (val_lo & 0xffff0000) | (addr & 0xffff); + + /* store the two halves back into memory */ + put_unaligned(val_hi, p); + put_unaligned(val_lo, p+1); + } else { + /* Put it straight in, no messing around */ + put_unaligned(addr, p); + } + return 0; +} + +#define flat_get_relocate_addr(rel) (rel & 0x7fffffff) + +#endif /* _ASM_MICROBLAZE_FLAT_H */ diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h new file mode 100644 index 000000000..5db7f4489 --- /dev/null +++ b/arch/microblaze/include/asm/ftrace.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MICROBLAZE_FTRACE +#define _ASM_MICROBLAZE_FTRACE + +#ifdef CONFIG_FUNCTION_TRACER + +#define MCOUNT_ADDR ((unsigned long)(_mcount)) +#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */ + +#ifndef __ASSEMBLY__ +extern void _mcount(void); +extern void ftrace_call_graph(void); +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +/* reloction of mcount call site is the same as the address */ +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +struct dyn_arch_ftrace { +}; +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#endif /* CONFIG_FUNCTION_TRACER */ +#endif /* _ASM_MICROBLAZE_FTRACE */ diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h new file mode 100644 index 000000000..2572077b0 --- /dev/null +++ b/arch/microblaze/include/asm/futex.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MICROBLAZE_FUTEX_H +#define _ASM_MICROBLAZE_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/errno.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +({ \ + __asm__ __volatile__ ( \ + "1: lwx %0, %2, r0; " \ + insn \ + "2: swx %1, %2, r0; \ + addic %1, r0, 0; \ + bnei %1, 1b; \ + 3: \ + .section .fixup,\"ax\"; \ + 4: brid 3b; \ + addik %1, r0, %3; \ + .previous; \ + .section __ex_table,\"a\"; \ + .word 1b,4b,2b,4b; \ + .previous;" \ + : "=&r" (oldval), "=&r" (ret) \ + : "r" (uaddr), "i" (-EFAULT), "r" (oparg) \ + ); \ +}) + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("or %1,%4,%4;", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %1,%0,%4;", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("andn %1,%0,%4;", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ("1: lwx %1, %3, r0; \ + cmp %2, %1, %4; \ + bnei %2, 3f; \ + 2: swx %5, %3, r0; \ + addic %2, r0, 0; \ + bnei %2, 1b; \ + 3: \ + .section .fixup,\"ax\"; \ + 4: brid 3b; \ + addik %0, r0, %6; \ + .previous; \ + .section __ex_table,\"a\"; \ + .word 1b,4b,2b,4b; \ + .previous;" \ + : "+r" (ret), "=&r" (prev), "=&r"(cmp) \ + : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)); + + *uval = prev; + return ret; +} + +#endif /* __KERNEL__ */ + +#endif diff --git a/arch/microblaze/include/asm/hash.h b/arch/microblaze/include/asm/hash.h new file mode 100644 index 000000000..ef4741a40 --- /dev/null +++ b/arch/microblaze/include/asm/hash.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_HASH_H +#define _ASM_HASH_H + +/* + * Fortunately, most people who want to run Linux on Microblaze enable + * both multiplier and barrel shifter, but omitting them is technically + * a supported configuration. + * + * With just a barrel shifter, we can implement an efficient constant + * multiply using shifts and adds. GCC can find a 9-step solution, but + * this 6-step solution was found by Yevgen Voronenko's implementation + * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html. + * + * That software is really not designed for a single multiplier this large, + * but if you run it enough times with different seeds, it'll find several + * 6-shift, 6-add sequences for computing x * 0x61C88647. They are all + * c = (x << 19) + x; + * a = (x << 9) + c; + * b = (x << 23) + a; + * return (a<<11) + (b<<6) + (c<<3) - b; + * with variations on the order of the final add. + * + * Without even a shifter, it's hopless; any hash function will suck. + */ + +#if CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL == 0 + +#define HAVE_ARCH__HASH_32 1 + +/* Multiply by GOLDEN_RATIO_32 = 0x61C88647 */ +static inline u32 __attribute_const__ __hash_32(u32 a) +{ +#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL + unsigned int b, c; + + /* Phase 1: Compute three intermediate values */ + b = a << 23; + c = (a << 19) + a; + a = (a << 9) + c; + b += a; + + /* Phase 2: Compute (a << 11) + (b << 6) + (c << 3) - b */ + a <<= 5; + a += b; /* (a << 5) + b */ + a <<= 3; + a += c; /* (a << 8) + (b << 3) + c */ + a <<= 3; + return a - b; /* (a << 11) + (b << 6) + (c << 3) - b */ +#else + /* + * "This is really going to hurt." + * + * Without a barrel shifter, left shifts are implemented as + * repeated additions, and the best we can do is an optimal + * addition-subtraction chain. This one is not known to be + * optimal, but at 37 steps, it's decent for a 31-bit multiplier. + * + * Question: given its size (37*4 = 148 bytes per instance), + * and slowness, is this worth having inline? + */ + unsigned int b, c, d; + + b = a << 4; /* 4 */ + c = b << 1; /* 1 5 */ + b += a; /* 1 6 */ + c += b; /* 1 7 */ + c <<= 3; /* 3 10 */ + c -= a; /* 1 11 */ + d = c << 7; /* 7 18 */ + d += b; /* 1 19 */ + d <<= 8; /* 8 27 */ + d += a; /* 1 28 */ + d <<= 1; /* 1 29 */ + d += b; /* 1 30 */ + d <<= 6; /* 6 36 */ + return d + c; /* 1 37 total instructions*/ +#endif +} + +#endif /* !CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL */ +#endif /* _ASM_HASH_H */ diff --git a/arch/microblaze/include/asm/highmem.h b/arch/microblaze/include/asm/highmem.h new file mode 100644 index 000000000..332c78e15 --- /dev/null +++ b/arch/microblaze/include/asm/highmem.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * highmem.h: virtual kernel memory mappings for high memory + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> + */ +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#ifdef __KERNEL__ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/uaccess.h> +#include <asm/fixmap.h> + +extern pte_t *kmap_pte; +extern pgprot_t kmap_prot; +extern pte_t *pkmap_page_table; + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. + */ +/* + * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte + * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP + * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP + * in case of 16K/64K/256K page sizes. + */ + +#define PKMAP_ORDER PTE_SHIFT +#define LAST_PKMAP (1 << PKMAP_ORDER) + +#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ + & PMD_MASK) + +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); +extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void __kunmap_atomic(void *kvaddr); + +static inline void *kmap(struct page *page) +{ + might_sleep(); + if (!PageHighMem(page)) + return page_address(page); + return kmap_high(page); +} + +static inline void kunmap(struct page *page) +{ + BUG_ON(in_interrupt()); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} + +static inline void *kmap_atomic(struct page *page) +{ + return kmap_atomic_prot(page, kmap_prot); +} + +#define flush_cache_kmaps() { flush_icache(); flush_dcache(); } + +#endif /* __KERNEL__ */ + +#endif /* _ASM_HIGHMEM_H */ diff --git a/arch/microblaze/include/asm/hw_irq.h b/arch/microblaze/include/asm/hw_irq.h new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/arch/microblaze/include/asm/hw_irq.h @@ -0,0 +1 @@ + diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h new file mode 100644 index 000000000..c79681394 --- /dev/null +++ b/arch/microblaze/include/asm/io.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_IO_H +#define _ASM_MICROBLAZE_IO_H + +#include <asm/byteorder.h> +#include <asm/page.h> +#include <linux/types.h> +#include <linux/mm.h> /* Get struct page {...} */ + +#ifndef CONFIG_PCI +#define _IO_BASE 0 +#define _ISA_MEM_BASE 0 +#else +#define _IO_BASE isa_io_base +#define _ISA_MEM_BASE isa_mem_base +struct pci_dev; +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); +#define pci_iounmap pci_iounmap + +extern unsigned long isa_io_base; +extern resource_size_t isa_mem_base; +#endif + +#define PCI_IOBASE ((void __iomem *)_IO_BASE) +#define IO_SPACE_LIMIT (0xFFFFFFFF) + +#ifdef CONFIG_MMU +#define page_to_bus(page) (page_to_phys(page)) + +extern void iounmap(volatile void __iomem *addr); + +extern void __iomem *ioremap(phys_addr_t address, unsigned long size); +#define ioremap_nocache(addr, size) ioremap((addr), (size)) +#define ioremap_fullcache(addr, size) ioremap((addr), (size)) +#define ioremap_wc(addr, size) ioremap((addr), (size)) +#define ioremap_wt(addr, size) ioremap((addr), (size)) + +#endif /* CONFIG_MMU */ + +/* Big Endian */ +#define out_be32(a, v) __raw_writel((v), (void __iomem __force *)(a)) +#define out_be16(a, v) __raw_writew((v), (a)) + +#define in_be32(a) __raw_readl((const void __iomem __force *)(a)) +#define in_be16(a) __raw_readw(a) + +#define writel_be(v, a) out_be32((__force unsigned *)a, v) +#define readl_be(a) in_be32((__force unsigned *)a) + +/* Little endian */ +#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a)) +#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a)) + +#define in_le32(a) __le32_to_cpu(__raw_readl(a)) +#define in_le16(a) __le16_to_cpu(__raw_readw(a)) + +/* Byte ops */ +#define out_8(a, v) __raw_writeb((v), (a)) +#define in_8(a) __raw_readb(a) + +#include <asm-generic/io.h> + +#endif /* _ASM_MICROBLAZE_IO_H */ diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h new file mode 100644 index 000000000..d785defee --- /dev/null +++ b/arch/microblaze/include/asm/irq.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_IRQ_H +#define _ASM_MICROBLAZE_IRQ_H + +#define NR_IRQS (32 + 1) +#include <asm-generic/irq.h> + +struct pt_regs; +extern void do_IRQ(struct pt_regs *regs); + +/* should be defined in each interrupt controller driver */ +extern unsigned int xintc_get_irq(void); + +#endif /* _ASM_MICROBLAZE_IRQ_H */ diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h new file mode 100644 index 000000000..c9a626283 --- /dev/null +++ b/arch/microblaze/include/asm/irqflags.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_IRQFLAGS_H +#define _ASM_MICROBLAZE_IRQFLAGS_H + +#include <linux/types.h> +#include <asm/registers.h> + +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + +static inline notrace unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + asm volatile(" msrclr %0, %1 \n" + " nop \n" + : "=r"(flags) + : "i"(MSR_IE) + : "memory"); + return flags; +} + +static inline notrace void arch_local_irq_disable(void) +{ + /* this uses r0 without declaring it - is that correct? */ + asm volatile(" msrclr r0, %0 \n" + " nop \n" + : + : "i"(MSR_IE) + : "memory"); +} + +static inline notrace void arch_local_irq_enable(void) +{ + /* this uses r0 without declaring it - is that correct? */ + asm volatile(" msrset r0, %0 \n" + " nop \n" + : + : "i"(MSR_IE) + : "memory"); +} + +#else /* !CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ + +static inline notrace unsigned long arch_local_irq_save(void) +{ + unsigned long flags, tmp; + asm volatile (" mfs %0, rmsr \n" + " nop \n" + " andi %1, %0, %2 \n" + " mts rmsr, %1 \n" + " nop \n" + : "=r"(flags), "=r"(tmp) + : "i"(~MSR_IE) + : "memory"); + return flags; +} + +static inline notrace void arch_local_irq_disable(void) +{ + unsigned long tmp; + asm volatile(" mfs %0, rmsr \n" + " nop \n" + " andi %0, %0, %1 \n" + " mts rmsr, %0 \n" + " nop \n" + : "=r"(tmp) + : "i"(~MSR_IE) + : "memory"); +} + +static inline notrace void arch_local_irq_enable(void) +{ + unsigned long tmp; + asm volatile(" mfs %0, rmsr \n" + " nop \n" + " ori %0, %0, %1 \n" + " mts rmsr, %0 \n" + " nop \n" + : "=r"(tmp) + : "i"(MSR_IE) + : "memory"); +} + +#endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ + +static inline notrace unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile(" mfs %0, rmsr \n" + " nop \n" + : "=r"(flags) + : + : "memory"); + return flags; +} + +static inline notrace void arch_local_irq_restore(unsigned long flags) +{ + asm volatile(" mts rmsr, %0 \n" + " nop \n" + : + : "r"(flags) + : "memory"); +} + +static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & MSR_IE) == 0; +} + +static inline notrace bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* _ASM_MICROBLAZE_IRQFLAGS_H */ diff --git a/arch/microblaze/include/asm/kgdb.h b/arch/microblaze/include/asm/kgdb.h new file mode 100644 index 000000000..8dc5ebb07 --- /dev/null +++ b/arch/microblaze/include/asm/kgdb.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef __KERNEL__ +#ifndef __MICROBLAZE_KGDB_H__ +#define __MICROBLAZE_KGDB_H__ + +#ifndef __ASSEMBLY__ + +#define CACHE_FLUSH_IS_SAFE 1 +#define BUFMAX 2048 + +/* + * 32 32-bit general purpose registers (r0-r31) + * 6 32-bit special registers (pc, msr, ear, esr, fsr, btr) + * 12 32-bit PVR + * 7 32-bit MMU Regs (redr, rpid, rzpr, rtlbx, rtlbsx, rtlblo, rtlbhi) + * ------ + * 57 registers + */ +#define NUMREGBYTES (57 * 4) + +#define BREAK_INSTR_SIZE 4 +static inline void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__("brki r16, 0x18;"); +} + +struct pt_regs; +asmlinkage void microblaze_kgdb_break(struct pt_regs *regs); + +#endif /* __ASSEMBLY__ */ +#endif /* __MICROBLAZE_KGDB_H__ */ +#endif /* __KERNEL__ */ diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h new file mode 100644 index 000000000..1f9edddf7 --- /dev/null +++ b/arch/microblaze/include/asm/mmu.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_MMU_H +#define _ASM_MICROBLAZE_MMU_H + +# ifndef CONFIG_MMU +# include <asm-generic/mmu.h> +# else /* CONFIG_MMU */ +# ifdef __KERNEL__ +# ifndef __ASSEMBLY__ + +/* Default "unsigned long" context */ +typedef unsigned long mm_context_t; + +/* Hardware Page Table Entry */ +typedef struct _PTE { + unsigned long v:1; /* Entry is valid */ + unsigned long vsid:24; /* Virtual segment identifier */ + unsigned long h:1; /* Hash algorithm indicator */ + unsigned long api:6; /* Abbreviated page index */ + unsigned long rpn:20; /* Real (physical) page number */ + unsigned long :3; /* Unused */ + unsigned long r:1; /* Referenced */ + unsigned long c:1; /* Changed */ + unsigned long w:1; /* Write-thru cache mode */ + unsigned long i:1; /* Cache inhibited */ + unsigned long m:1; /* Memory coherence */ + unsigned long g:1; /* Guarded */ + unsigned long :1; /* Unused */ + unsigned long pp:2; /* Page protection */ +} PTE; + +/* Values for PP (assumes Ks=0, Kp=1) */ +# define PP_RWXX 0 /* Supervisor read/write, User none */ +# define PP_RWRX 1 /* Supervisor read/write, User read */ +# define PP_RWRW 2 /* Supervisor read/write, User read/write */ +# define PP_RXRX 3 /* Supervisor read, User read */ + +/* Segment Register */ +typedef struct _SEGREG { + unsigned long t:1; /* Normal or I/O type */ + unsigned long ks:1; /* Supervisor 'key' (normally 0) */ + unsigned long kp:1; /* User 'key' (normally 1) */ + unsigned long n:1; /* No-execute */ + unsigned long :4; /* Unused */ + unsigned long vsid:24; /* Virtual Segment Identifier */ +} SEGREG; + +extern void _tlbie(unsigned long va); /* invalidate a TLB entry */ +extern void _tlbia(void); /* invalidate all TLB entries */ + +/* + * tlb_skip size stores actual number skipped TLBs from TLB0 - every directy TLB + * mapping has to increase tlb_skip size. + */ +extern u32 tlb_skip; +# endif /* __ASSEMBLY__ */ + +/* + * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The + * instruction and data sides share a unified, 64-entry, semi-associative + * TLB which is maintained totally under software control. In addition, the + * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative + * TLB which serves as a first level to the shared TLB. These two TLBs are + * known as the UTLB and ITLB, respectively. + */ + +# define MICROBLAZE_TLB_SIZE 64 + +/* For cases when you want to skip some TLB entries */ +# define MICROBLAZE_TLB_SKIP 0 + +/* Use the last TLB for temporary access to LMB */ +# define MICROBLAZE_LMB_TLB_ID 63 + +/* + * TLB entries are defined by a "high" tag portion and a "low" data + * portion. The data portion is 32-bits. + * + * TLB entries are managed entirely under software control by reading, + * writing, and searching using the MTS and MFS instructions. + */ + +# define TLB_LO 1 +# define TLB_HI 0 +# define TLB_DATA TLB_LO +# define TLB_TAG TLB_HI + +/* Tag portion */ +# define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */ +# define TLB_PAGESZ_MASK 0x00000380 +# define TLB_PAGESZ(x) (((x) & 0x7) << 7) +# define PAGESZ_1K 0 +# define PAGESZ_4K 1 +# define PAGESZ_16K 2 +# define PAGESZ_64K 3 +# define PAGESZ_256K 4 +# define PAGESZ_1M 5 +# define PAGESZ_4M 6 +# define PAGESZ_16M 7 +# define TLB_VALID 0x00000040 /* Entry is valid */ + +/* Data portion */ +# define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */ +# define TLB_PERM_MASK 0x00000300 +# define TLB_EX 0x00000200 /* Instruction execution allowed */ +# define TLB_WR 0x00000100 /* Writes permitted */ +# define TLB_ZSEL_MASK 0x000000F0 +# define TLB_ZSEL(x) (((x) & 0xF) << 4) +# define TLB_ATTR_MASK 0x0000000F +# define TLB_W 0x00000008 /* Caching is write-through */ +# define TLB_I 0x00000004 /* Caching is inhibited */ +# define TLB_M 0x00000002 /* Memory is coherent */ +# define TLB_G 0x00000001 /* Memory is guarded from prefetch */ + +# endif /* __KERNEL__ */ +# endif /* CONFIG_MMU */ +#endif /* _ASM_MICROBLAZE_MMU_H */ diff --git a/arch/microblaze/include/asm/mmu_context.h b/arch/microblaze/include/asm/mmu_context.h new file mode 100644 index 000000000..f74f9da07 --- /dev/null +++ b/arch/microblaze/include/asm/mmu_context.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef CONFIG_MMU +# include <asm/mmu_context_mm.h> +#else +# include <asm-generic/mmu_context.h> +#endif diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h new file mode 100644 index 000000000..97559fe0b --- /dev/null +++ b/arch/microblaze/include/asm/mmu_context_mm.h @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H +#define _ASM_MICROBLAZE_MMU_CONTEXT_H + +#include <linux/atomic.h> +#include <linux/mm_types.h> +#include <linux/sched.h> + +#include <asm/bitops.h> +#include <asm/mmu.h> +#include <asm-generic/mm_hooks.h> + +# ifdef __KERNEL__ +/* + * This function defines the mapping from contexts to VSIDs (virtual + * segment IDs). We use a skew on both the context and the high 4 bits + * of the 32-bit virtual address (the "effective segment ID") in order + * to spread out the entries in the MMU hash table. + */ +# define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ + & 0xffffff) + +/* + MicroBlaze has 256 contexts, so we can just rotate through these + as a way of "switching" contexts. If the TID of the TLB is zero, + the PID/TID comparison is disabled, so we can use a TID of zero + to represent all kernel pages as shared among all contexts. + */ + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +# define NO_CONTEXT 256 +# define LAST_CONTEXT 255 +# define FIRST_CONTEXT 1 + +/* + * Set the current MMU context. + * This is done byloading up the segment registers for the user part of the + * address space. + * + * Since the PGD is immediately available, it is much faster to simply + * pass this along as a second parameter, which is required for 8xx and + * can be used for debugging on all processors (if you happen to have + * an Abatron). + */ +extern void set_context(mm_context_t context, pgd_t *pgd); + +/* + * Bitmap of contexts in use. + * The size of this bitmap is LAST_CONTEXT + 1 bits. + */ +extern unsigned long context_map[]; + +/* + * This caches the next context number that we expect to be free. + * Its use is an optimization only, we can't rely on this context + * number to be free, but it usually will be. + */ +extern mm_context_t next_mmu_context; + +/* + * Since we don't have sufficient contexts to give one to every task + * that could be in the system, we need to be able to steal contexts. + * These variables support that. + */ +extern atomic_t nr_free_contexts; +extern struct mm_struct *context_mm[LAST_CONTEXT+1]; +extern void steal_context(void); + +/* + * Get a new mmu context for the address space described by `mm'. + */ +static inline void get_mmu_context(struct mm_struct *mm) +{ + mm_context_t ctx; + + if (mm->context != NO_CONTEXT) + return; + while (atomic_dec_if_positive(&nr_free_contexts) < 0) + steal_context(); + ctx = next_mmu_context; + while (test_and_set_bit(ctx, context_map)) { + ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); + if (ctx > LAST_CONTEXT) + ctx = 0; + } + next_mmu_context = (ctx + 1) & LAST_CONTEXT; + mm->context = ctx; + context_mm[ctx] = mm; +} + +/* + * Set up the context for a new address space. + */ +# define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) + +/* + * We're finished using the context for an address space. + */ +static inline void destroy_context(struct mm_struct *mm) +{ + if (mm->context != NO_CONTEXT) { + clear_bit(mm->context, context_map); + mm->context = NO_CONTEXT; + atomic_inc(&nr_free_contexts); + } +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + tsk->thread.pgdir = next->pgd; + get_mmu_context(next); + set_context(next->context, next->pgd); +} + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +static inline void activate_mm(struct mm_struct *active_mm, + struct mm_struct *mm) +{ + current->thread.pgdir = mm->pgd; + get_mmu_context(mm); + set_context(mm->context, mm->pgd); +} + +extern void mmu_context_init(void); + +# endif /* __KERNEL__ */ +#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */ diff --git a/arch/microblaze/include/asm/module.h b/arch/microblaze/include/asm/module.h new file mode 100644 index 000000000..7be1347fc --- /dev/null +++ b/arch/microblaze/include/asm/module.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_MODULE_H +#define _ASM_MICROBLAZE_MODULE_H + +#include <asm-generic/module.h> + +/* Microblaze Relocations */ +#define R_MICROBLAZE_NONE 0 +#define R_MICROBLAZE_32 1 +#define R_MICROBLAZE_32_PCREL 2 +#define R_MICROBLAZE_64_PCREL 3 +#define R_MICROBLAZE_32_PCREL_LO 4 +#define R_MICROBLAZE_64 5 +#define R_MICROBLAZE_32_LO 6 +#define R_MICROBLAZE_SRO32 7 +#define R_MICROBLAZE_SRW32 8 +#define R_MICROBLAZE_64_NONE 9 +#define R_MICROBLAZE_32_SYM_OP_SYM 10 +/* Keep this the last entry. */ +#define R_MICROBLAZE_NUM 11 + +typedef struct { volatile int counter; } module_t; + +#endif /* _ASM_MICROBLAZE_MODULE_H */ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h new file mode 100644 index 000000000..d506bb089 --- /dev/null +++ b/arch/microblaze/include/asm/page.h @@ -0,0 +1,212 @@ +/* + * VM ops + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * Changes for MMU support: + * Copyright (C) 2007 Xilinx, Inc. All rights reserved. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_PAGE_H +#define _ASM_MICROBLAZE_PAGE_H + +#include <linux/pfn.h> +#include <asm/setup.h> +#include <asm/asm-compat.h> +#include <linux/const.h> + +#ifdef __KERNEL__ + +/* PAGE_SHIFT determines the page size */ +#if defined(CONFIG_MICROBLAZE_64K_PAGES) +#define PAGE_SHIFT 16 +#elif defined(CONFIG_MICROBLAZE_16K_PAGES) +#define PAGE_SHIFT 14 +#else +#define PAGE_SHIFT 12 +#endif +#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR)) + +#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ + +#ifndef __ASSEMBLY__ + +/* MS be sure that SLAB allocates aligned objects */ +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES + +#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) +#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) + +#ifndef CONFIG_MMU +/* + * PAGE_OFFSET -- the first address of the first page of memory. When not + * using MMU this corresponds to the first free page in physical memory (aligned + * on a page boundary). + */ +extern unsigned int __page_offset; +#define PAGE_OFFSET __page_offset + +#else /* CONFIG_MMU */ + +/* + * PAGE_OFFSET -- the first address of the first page of memory. With MMU + * it is set to the kernel start address (aligned on a page boundary). + * + * CONFIG_KERNEL_START is defined in arch/microblaze/config.in and used + * in arch/microblaze/Makefile. + */ +#define PAGE_OFFSET CONFIG_KERNEL_START + +/* + * The basic type of a PTE - 32 bit physical addressing. + */ +typedef unsigned long pte_basic_t; +#define PTE_FMT "%.8lx" + +#endif /* CONFIG_MMU */ + +# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) +# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) + +# define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) +# define copy_user_page(vto, vfrom, vaddr, topg) \ + memcpy((vto), (vfrom), PAGE_SIZE) + +/* + * These are used to make use of C type-checking.. + */ +typedef struct page *pgtable_t; +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pgprot; } pgprot_t; +/* FIXME this can depend on linux kernel version */ +# ifdef CONFIG_MMU +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +# else /* CONFIG_MMU */ +typedef struct { unsigned long ste[64]; } pmd_t; +typedef struct { pmd_t pue[1]; } pud_t; +typedef struct { pud_t p4e[1]; } p4d_t; +typedef struct { p4d_t pge[1]; } pgd_t; +# endif /* CONFIG_MMU */ + +# define pte_val(x) ((x).pte) +# define pgprot_val(x) ((x).pgprot) + +# ifdef CONFIG_MMU +# define pmd_val(x) ((x).pmd) +# define pgd_val(x) ((x).pgd) +# else /* CONFIG_MMU */ +# define pmd_val(x) ((x).ste[0]) +# define pud_val(x) ((x).pue[0]) +# define pgd_val(x) ((x).pge[0]) +# endif /* CONFIG_MMU */ + +# define __pte(x) ((pte_t) { (x) }) +# define __pmd(x) ((pmd_t) { (x) }) +# define __pgd(x) ((pgd_t) { (x) }) +# define __pgprot(x) ((pgprot_t) { (x) }) + +/** + * Conversions for virtual address, physical address, pfn, and struct + * page are defined in the following files. + * + * virt -+ + * | asm-microblaze/page.h + * phys -+ + * | linux/pfn.h + * pfn -+ + * | asm-generic/memory_model.h + * page -+ + * + */ + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; +extern unsigned long max_pfn; + +extern unsigned long memory_start; +extern unsigned long memory_size; +extern unsigned long lowmem_size; + +extern unsigned long kernel_tlb; + +extern int page_is_ram(unsigned long pfn); + +# define phys_to_pfn(phys) (PFN_DOWN(phys)) +# define pfn_to_phys(pfn) (PFN_PHYS(pfn)) + +# define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) +# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) + +# ifdef CONFIG_MMU + +# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) +# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) +# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +# else /* CONFIG_MMU */ +# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) +# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) +# define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) +# define page_to_bus(page) (page_to_phys(page)) +# define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) +# endif /* CONFIG_MMU */ + +# ifndef CONFIG_MMU +# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && \ + ((pfn) <= (min_low_pfn + max_mapnr))) +# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) +# else /* CONFIG_MMU */ +# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) +# define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET)) +# endif /* CONFIG_MMU */ + +# endif /* __ASSEMBLY__ */ + +#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) + +# define __pa(x) __virt_to_phys((unsigned long)(x)) +# define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) + +/* Convert between virtual and physical address for MMU. */ +/* Handle MicroBlaze processor with virtual memory. */ +#ifndef CONFIG_MMU +#define __virt_to_phys(addr) addr +#define __phys_to_virt(addr) addr +#define tophys(rd, rs) addik rd, rs, 0 +#define tovirt(rd, rs) addik rd, rs, 0 +#else +#define __virt_to_phys(addr) \ + ((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START) +#define __phys_to_virt(addr) \ + ((addr) + CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR) +#define tophys(rd, rs) \ + addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START) +#define tovirt(rd, rs) \ + addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR) +#endif /* CONFIG_MMU */ + +#define TOPHYS(addr) __virt_to_phys(addr) + +#ifdef CONFIG_MMU + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#endif /* CONFIG_MMU */ + +#endif /* __KERNEL__ */ + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* _ASM_MICROBLAZE_PAGE_H */ diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h new file mode 100644 index 000000000..cb5d39794 --- /dev/null +++ b/arch/microblaze/include/asm/pci-bridge.h @@ -0,0 +1,144 @@ +#ifndef _ASM_MICROBLAZE_PCI_BRIDGE_H +#define _ASM_MICROBLAZE_PCI_BRIDGE_H +#ifdef __KERNEL__ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/pci.h> +#include <linux/list.h> +#include <linux/ioport.h> + +struct device_node; + +#ifdef CONFIG_PCI +extern struct list_head hose_list; +extern int pcibios_vaddr_is_ioport(void __iomem *address); +#else +static inline int pcibios_vaddr_is_ioport(void __iomem *address) +{ + return 0; +} +#endif + +/* + * Structure of a PCI controller (host bridge) + */ +struct pci_controller { + struct pci_bus *bus; + char is_dynamic; + struct device_node *dn; + struct list_head list_node; + struct device *parent; + + int first_busno; + int last_busno; + + int self_busno; + + void __iomem *io_base_virt; + resource_size_t io_base_phys; + + resource_size_t pci_io_size; + + /* Some machines (PReP) have a non 1:1 mapping of + * the PCI memory space in the CPU bus space + */ + resource_size_t pci_mem_offset; + + /* Some machines have a special region to forward the ISA + * "memory" cycles such as VGA memory regions. Left to 0 + * if unsupported + */ + resource_size_t isa_mem_phys; + resource_size_t isa_mem_size; + + struct pci_ops *ops; + unsigned int __iomem *cfg_addr; + void __iomem *cfg_data; + + /* + * Used for variants of PCI indirect handling and possible quirks: + * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1 + * EXT_REG - provides access to PCI-e extended registers + * SURPRESS_PRIMARY_BUS - we suppress the setting of PCI_PRIMARY_BUS + * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS + * to determine which bus number to match on when generating type0 + * config cycles + * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with + * hanging if we don't have link and try to do config cycles to + * anything but the PHB. Only allow talking to the PHB if this is + * set. + * BIG_ENDIAN - cfg_addr is a big endian register + * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs + * on the PLB4. Effectively disable MRM commands by setting this. + */ +#define INDIRECT_TYPE_SET_CFG_TYPE 0x00000001 +#define INDIRECT_TYPE_EXT_REG 0x00000002 +#define INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004 +#define INDIRECT_TYPE_NO_PCIE_LINK 0x00000008 +#define INDIRECT_TYPE_BIG_ENDIAN 0x00000010 +#define INDIRECT_TYPE_BROKEN_MRM 0x00000020 + u32 indirect_type; + + /* Currently, we limit ourselves to 1 IO range and 3 mem + * ranges since the common pci_bus structure can't handle more + */ + struct resource io_resource; + struct resource mem_resources[3]; + int global_number; /* PCI domain number */ +}; + +#ifdef CONFIG_PCI +static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) +{ + return bus->sysdata; +} + +static inline int isa_vaddr_is_ioport(void __iomem *address) +{ + /* No specific ISA handling on ppc32 at this stage, it + * all goes through PCI + */ + return 0; +} +#endif /* CONFIG_PCI */ + +/* These are used for config access before all the PCI probing + has been done. */ +extern int early_read_config_byte(struct pci_controller *hose, int bus, + int dev_fn, int where, u8 *val); +extern int early_read_config_word(struct pci_controller *hose, int bus, + int dev_fn, int where, u16 *val); +extern int early_read_config_dword(struct pci_controller *hose, int bus, + int dev_fn, int where, u32 *val); +extern int early_write_config_byte(struct pci_controller *hose, int bus, + int dev_fn, int where, u8 val); +extern int early_write_config_word(struct pci_controller *hose, int bus, + int dev_fn, int where, u16 val); +extern int early_write_config_dword(struct pci_controller *hose, int bus, + int dev_fn, int where, u32 val); + +extern int early_find_capability(struct pci_controller *hose, int bus, + int dev_fn, int cap); + +extern void setup_indirect_pci(struct pci_controller *hose, + resource_size_t cfg_addr, + resource_size_t cfg_data, u32 flags); + +/* Get the PCI host controller for an OF device */ +extern struct pci_controller *pci_find_hose_for_OF_device( + struct device_node *node); + +/* Fill up host controller resources from the OF node */ +extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, + struct device_node *dev, int primary); + +/* Allocate & free a PCI host bridge structure */ +extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); +extern void pcibios_free_controller(struct pci_controller *phb); + +#endif /* __KERNEL__ */ +#endif /* _ASM_MICROBLAZE_PCI_BRIDGE_H */ diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h new file mode 100644 index 000000000..859c19828 --- /dev/null +++ b/arch/microblaze/include/asm/pci.h @@ -0,0 +1,82 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Based on powerpc version + */ + +#ifndef __ASM_MICROBLAZE_PCI_H +#define __ASM_MICROBLAZE_PCI_H +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/scatterlist.h> + +#include <asm/io.h> +#include <asm/pci-bridge.h> + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0x10000000 + +/* Values for the `which' argument to sys_pciconfig_iobase syscall. */ +#define IOBASE_BRIDGE_NUMBER 0 +#define IOBASE_MEMORY 1 +#define IOBASE_IO 2 +#define IOBASE_ISA_IO 3 +#define IOBASE_ISA_MEM 4 + +#define pcibios_scan_all_fns(a, b) 0 + +/* + * Set this to 1 if you want the kernel to re-assign all PCI + * bus numbers (don't do that on ppc64 yet !) + */ +#define pcibios_assign_all_busses() 0 + +extern int pci_domain_nr(struct pci_bus *bus); + +/* Decide whether to display the domain number in /proc */ +extern int pci_proc_domain(struct pci_bus *bus); + +struct vm_area_struct; + +/* Tell PCI code what kind of PCI resource mappings we support */ +#define HAVE_PCI_MMAP 1 +#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 +#define arch_can_pci_mmap_io() 1 + +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); + +#define HAVE_PCI_LEGACY 1 + +extern void pcibios_resource_survey(void); + +struct file; +extern pgprot_t pci_phys_mem_access_prot(struct file *file, + unsigned long pfn, + unsigned long size, + pgprot_t prot); + +#define HAVE_ARCH_PCI_RESOURCE_TO_USER + +/* This part of code was originally in xilinx-pci.h */ +#ifdef CONFIG_PCI_XILINX +extern void __init xilinx_pci_init(void); +#else +static inline void __init xilinx_pci_init(void) { return; } +#endif + +#endif /* __KERNEL__ */ +#endif /* __ASM_MICROBLAZE_PCI_H */ diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h new file mode 100644 index 000000000..7c89390c0 --- /dev/null +++ b/arch/microblaze/include/asm/pgalloc.h @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_PGALLOC_H +#define _ASM_MICROBLAZE_PGALLOC_H + +#ifdef CONFIG_MMU + +#include <linux/kernel.h> /* For min/max macros */ +#include <linux/highmem.h> +#include <asm/setup.h> +#include <asm/io.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/pgtable.h> + +#define PGDIR_ORDER 0 + +/* + * This is handled very differently on MicroBlaze since out page tables + * are all 0's and I want to be able to use these zero'd pages elsewhere + * as well - it gives us quite a speedup. + * -- Cort + */ +extern struct pgtable_cache_struct { + unsigned long *pgd_cache; + unsigned long *pte_cache; + unsigned long pgtable_cache_sz; +} quicklists; + +#define pgd_quicklist (quicklists.pgd_cache) +#define pmd_quicklist ((unsigned long *)0) +#define pte_quicklist (quicklists.pte_cache) +#define pgtable_cache_size (quicklists.pgtable_cache_sz) + +extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */ +extern atomic_t zero_sz; /* # currently pre-zero'd pages */ +extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */ +extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */ +extern atomic_t zerototal; /* # pages zero'd over time */ + +#define zero_quicklist (zero_cache) +#define zero_cache_sz (zero_sz) +#define zero_cache_calls (zeropage_calls) +#define zero_cache_hits (zeropage_hits) +#define zero_cache_total (zerototal) + +/* + * return a pre-zero'd page from the list, + * return NULL if none available -- Cort + */ +extern unsigned long get_zero_page_fast(void); + +extern void __bad_pte(pmd_t *pmd); + +static inline pgd_t *get_pgd_slow(void) +{ + pgd_t *ret; + + ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER); + if (ret != NULL) + clear_page(ret); + return ret; +} + +static inline pgd_t *get_pgd_fast(void) +{ + unsigned long *ret; + + ret = pgd_quicklist; + if (ret != NULL) { + pgd_quicklist = (unsigned long *)(*ret); + ret[0] = 0; + pgtable_cache_size--; + } else + ret = (unsigned long *)get_pgd_slow(); + return (pgd_t *)ret; +} + +static inline void free_pgd_fast(pgd_t *pgd) +{ + *(unsigned long **)pgd = pgd_quicklist; + pgd_quicklist = (unsigned long *) pgd; + pgtable_cache_size++; +} + +static inline void free_pgd_slow(pgd_t *pgd) +{ + free_page((unsigned long)pgd); +} + +#define pgd_free(mm, pgd) free_pgd_fast(pgd) +#define pgd_alloc(mm) get_pgd_fast() + +#define pmd_pgtable(pmd) pmd_page(pmd) + +/* + * We don't have any real pmd's, and this code never triggers because + * the pgd will always be present.. + */ +#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) +#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) + +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *ptepage; + +#ifdef CONFIG_HIGHPTE + int flags = GFP_KERNEL | __GFP_HIGHMEM; +#else + int flags = GFP_KERNEL; +#endif + + ptepage = alloc_pages(flags, 0); + if (!ptepage) + return NULL; + clear_highpage(ptepage); + if (!pgtable_page_ctor(ptepage)) { + __free_page(ptepage); + return NULL; + } + return ptepage; +} + +static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, + unsigned long address) +{ + unsigned long *ret; + + ret = pte_quicklist; + if (ret != NULL) { + pte_quicklist = (unsigned long *)(*ret); + ret[0] = 0; + pgtable_cache_size--; + } + return (pte_t *)ret; +} + +static inline void pte_free_fast(pte_t *pte) +{ + *(unsigned long **)pte = pte_quicklist; + pte_quicklist = (unsigned long *) pte; + pgtable_cache_size++; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static inline void pte_free_slow(struct page *ptepage) +{ + __free_page(ptepage); +} + +static inline void pte_free(struct mm_struct *mm, struct page *ptepage) +{ + pgtable_page_dtor(ptepage); + __free_page(ptepage); +} + +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) + +#define pmd_populate(mm, pmd, pte) \ + (pmd_val(*(pmd)) = (unsigned long)page_address(pte)) + +#define pmd_populate_kernel(mm, pmd, pte) \ + (pmd_val(*(pmd)) = (unsigned long) (pte)) + +/* + * We don't have any real pmd's, and this code never triggers because + * the pgd will always be present.. + */ +#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) +#define pmd_free(mm, x) do { } while (0) +#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) +#define pgd_populate(mm, pmd, pte) BUG() + +extern int do_check_pgt_cache(int, int); + +#endif /* CONFIG_MMU */ + +#define check_pgt_cache() do { } while (0) + +#endif /* _ASM_MICROBLAZE_PGALLOC_H */ diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h new file mode 100644 index 000000000..2ca598534 --- /dev/null +++ b/arch/microblaze/include/asm/pgtable.h @@ -0,0 +1,561 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_PGTABLE_H +#define _ASM_MICROBLAZE_PGTABLE_H + +#include <asm/setup.h> + +#ifndef __ASSEMBLY__ +extern int mem_init_done; +#endif + +#ifndef CONFIG_MMU + +#define pgd_present(pgd) (1) /* pages are always present on non MMU */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_clear(pgdp) +#define kern_addr_valid(addr) (1) +#define pmd_offset(a, b) ((void *) 0) + +#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ + +#define pgprot_noncached(x) (x) +#define pgprot_writecombine pgprot_noncached +#define pgprot_device pgprot_noncached + +#define __swp_type(x) (0) +#define __swp_offset(x) (0) +#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) + +#define swapper_pg_dir ((pgd_t *) NULL) + +#define pgtable_cache_init() do {} while (0) + +#define arch_enter_lazy_cpu_mode() do {} while (0) + +#define pgprot_noncached_wc(prot) prot + +/* + * All 32bit addresses are effectively valid for vmalloc... + * Sort of meaningless for non-VM targets. + */ +#define VMALLOC_START 0 +#define VMALLOC_END 0xffffffff + +#else /* CONFIG_MMU */ + +#include <asm-generic/4level-fixup.h> + +#define __PAGETABLE_PMD_FOLDED 1 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#include <linux/sched.h> +#include <linux/threads.h> +#include <asm/processor.h> /* For TASK_SIZE */ +#include <asm/mmu.h> +#include <asm/page.h> + +#define FIRST_USER_ADDRESS 0UL + +extern unsigned long va_to_phys(unsigned long address); +extern pte_t *va_to_pte(unsigned long address); + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ + +static inline int pte_special(pte_t pte) { return 0; } + +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +/* Start and end of the vmalloc area. */ +/* Make sure to map the vmalloc area above the pinned kernel memory area + of 32Mb. */ +#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE) +#define VMALLOC_END ioremap_bot + +#endif /* __ASSEMBLY__ */ + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached(prot) \ + (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE | _PAGE_GUARDED)) + +#define pgprot_noncached_wc(prot) \ + (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE)) + +/* + * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash + * table containing PTEs, together with a set of 16 segment registers, to + * define the virtual to physical address mapping. + * + * We use the hash table as an extended TLB, i.e. a cache of currently + * active mappings. We maintain a two-level page table tree, much + * like that used by the i386, for the sake of the Linux memory + * management code. Low-level assembler code in hashtable.S + * (procedure hash_page) is responsible for extracting ptes from the + * tree and putting them into the hash table when necessary, and + * updating the accessed and modified bits in the page table tree. + */ + +/* + * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The + * instruction and data sides share a unified, 64-entry, semi-associative + * TLB which is maintained totally under software control. In addition, the + * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative + * TLB which serves as a first level to the shared TLB. These two TLBs are + * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions). + */ + +/* + * The normal case is that PTEs are 32-bits and we have a 1-page + * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus + * + */ + +/* PMD_SHIFT determines the size of the area mapped by the PTE pages */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* PGDIR_SHIFT determines what a top-level page table entry can map */ +#define PGDIR_SHIFT PMD_SHIFT +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * entries per page directory level: our page-table tree is two-level, so + * we don't really have any PMD directory. + */ +#define PTRS_PER_PTE (1 << PTE_SHIFT) +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define FIRST_USER_PGD_NR 0 + +#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) +#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) + +#define pte_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \ + __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ + __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ + __FILE__, __LINE__, pgd_val(e)) + +/* + * Bits in a linux-style PTE. These match the bits in the + * (hardware-defined) PTE as closely as possible. + */ + +/* There are several potential gotchas here. The hardware TLBLO + * field looks like this: + * + * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * RPN..................... 0 0 EX WR ZSEL....... W I M G + * + * Where possible we make the Linux PTE bits match up with this + * + * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can + * support down to 1k pages), this is done in the TLBMiss exception + * handler. + * - We use only zones 0 (for kernel pages) and 1 (for user pages) + * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB + * miss handler. Bit 27 is PAGE_USER, thus selecting the correct + * zone. + * - PRESENT *must* be in the bottom two bits because swap cache + * entries use the top 30 bits. Because 4xx doesn't support SMP + * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 + * is cleared in the TLB miss handler before the TLB entry is loaded. + * - All other bits of the PTE are loaded into TLBLO without + * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for + * software PTE bits. We actually use use bits 21, 24, 25, and + * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and + * PRESENT. + */ + +/* Definitions for MicroBlaze. */ +#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ +#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ +#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ +#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ +#define _PAGE_USER 0x010 /* matches one of the zone permission bits */ +#define _PAGE_RW 0x040 /* software: Writes permitted */ +#define _PAGE_DIRTY 0x080 /* software: dirty page */ +#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ +#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ +#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ +#define _PMD_PRESENT PAGE_MASK + +/* + * Some bits are unused... + */ +#ifndef _PAGE_HASHPTE +#define _PAGE_HASHPTE 0 +#endif +#ifndef _PTE_NONE_MASK +#define _PTE_NONE_MASK 0 +#endif +#ifndef _PAGE_SHARED +#define _PAGE_SHARED 0 +#endif +#ifndef _PAGE_EXEC +#define _PAGE_EXEC 0 +#endif + +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) + +/* + * Note: the _PAGE_COHERENT bit automatically gets set in the hardware + * PTE if CONFIG_SMP is defined (hash_page does this); there is no need + * to have it in the Linux PTE, and in fact the bit could be reused for + * another purpose. -- paulus. + */ +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) +#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) + +#define _PAGE_KERNEL \ + (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC) + +#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) + +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X \ + __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED) +#define PAGE_KERNEL_CI __pgprot(_PAGE_IO) + +/* + * We consider execute permission the same as read. + * Also, write permissions imply read permissions. + */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY_X +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY_X +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY_X +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY_X + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY_X +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED_X +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY_X +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED_X + +#ifndef __ASSEMBLY__ +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[1024]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +#endif /* __ASSEMBLY__ */ + +#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) +#define pte_clear(mm, addr, ptep) \ + do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0) +#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0) +#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) + +#define pte_page(x) (mem_map + (unsigned long) \ + ((pte_val(x) - memory_start) >> PAGE_SHIFT)) +#define PFN_SHIFT_OFFSET (PAGE_SHIFT) + +#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) + +#define pfn_pte(pfn, prot) \ + __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot)) + +#ifndef __ASSEMBLY__ +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +#define pgd_clear(xp) do { } while (0) +#define pgd_page(pgd) \ + ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } +static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } + +static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } +static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } + +static inline pte_t pte_rdprotect(pte_t pte) \ + { pte_val(pte) &= ~_PAGE_USER; return pte; } +static inline pte_t pte_wrprotect(pte_t pte) \ + { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } +static inline pte_t pte_exprotect(pte_t pte) \ + { pte_val(pte) &= ~_PAGE_EXEC; return pte; } +static inline pte_t pte_mkclean(pte_t pte) \ + { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } +static inline pte_t pte_mkold(pte_t pte) \ + { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } + +static inline pte_t pte_mkread(pte_t pte) \ + { pte_val(pte) |= _PAGE_USER; return pte; } +static inline pte_t pte_mkexec(pte_t pte) \ + { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } +static inline pte_t pte_mkwrite(pte_t pte) \ + { pte_val(pte) |= _PAGE_RW; return pte; } +static inline pte_t pte_mkdirty(pte_t pte) \ + { pte_val(pte) |= _PAGE_DIRTY; return pte; } +static inline pte_t pte_mkyoung(pte_t pte) \ + { pte_val(pte) |= _PAGE_ACCESSED; return pte; } + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ + +static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot) +{ + pte_t pte; + pte_val(pte) = physpage | pgprot_val(pgprot); + return pte; +} + +#define mk_pte(page, pgprot) \ +({ \ + pte_t pte; \ + pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \ + pgprot_val(pgprot); \ + pte; \ +}) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + +/* + * Atomic PTE updates. + * + * pte_update clears and sets bit atomically, and returns + * the old pte value. + * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant + * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits. + */ +static inline unsigned long pte_update(pte_t *p, unsigned long clr, + unsigned long set) +{ + unsigned long flags, old, tmp; + + raw_local_irq_save(flags); + + __asm__ __volatile__( "lw %0, %2, r0 \n" + "andn %1, %0, %3 \n" + "or %1, %1, %4 \n" + "sw %1, %2, r0 \n" + : "=&r" (old), "=&r" (tmp) + : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set) + : "cc"); + + raw_local_irq_restore(flags); + + return old; +} + +/* + * set_pte stores a linux PTE into the linux page table. + */ +static inline void set_pte(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + *ptep = pte; +} + +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + *ptep = pte; +} + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0; +} + +static inline int ptep_test_and_clear_dirty(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return (pte_update(ptep, \ + (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; +} + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); +} + +/*static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); +}*/ + +static inline void ptep_mkdirty(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_update(ptep, 0, _PAGE_DIRTY); +} + +/*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/ + +/* Convert pmd entry to page */ +/* our pmd entry is an effective address of pte table*/ +/* returns effective address of the pmd entry*/ +#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) + +/* returns struct *page of the pmd entry*/ +#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* to find an entry in a page-table-directory */ +#define pgd_index(address) ((address) >> PGDIR_SHIFT) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +/* Find an entry in the second-level page table.. */ +static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) +{ + return (pmd_t *) dir; +} + +/* Find an entry in the third-level page table.. */ +#define pte_index(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, addr) \ + ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) +#define pte_offset_map(dir, addr) \ + ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) + +#define pte_unmap(pte) kunmap_atomic(pte) + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* + * Encode and decode a swap entry. + * Note that the bits we use in a PTE for representing a swap entry + * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit + * (if used). -- paulus + */ +#define __swp_type(entry) ((entry).val & 0x3f) +#define __swp_offset(entry) ((entry).val >> 6) +#define __swp_entry(type, offset) \ + ((swp_entry_t) { (type) | ((offset) << 6) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) + +extern unsigned long iopa(unsigned long addr); + +/* Values for nocacheflag and cmode */ +/* These are not used by the APUS kernel_map, but prevents + * compilation errors. + */ +#define IOMAP_FULL_CACHING 0 +#define IOMAP_NOCACHE_SER 1 +#define IOMAP_NOCACHE_NONSER 2 +#define IOMAP_NO_COPYBACK 3 + +/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +#define kern_addr_valid(addr) (1) + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) + +void do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code); + +void mapin_ram(void); +int map_page(unsigned long va, phys_addr_t pa, int flags); + +extern int mem_init_done; + +asmlinkage void __init mmu_init(void); + +void __init *early_get_page(void); + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* CONFIG_MMU */ + +#ifndef __ASSEMBLY__ +#include <asm-generic/pgtable.h> + +extern unsigned long ioremap_bot, ioremap_base; + +unsigned long consistent_virt_to_pfn(void *vaddr); + +void setup_memory(void); +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_MICROBLAZE_PGTABLE_H */ diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h new file mode 100644 index 000000000..330d55686 --- /dev/null +++ b/arch/microblaze/include/asm/processor.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_PROCESSOR_H +#define _ASM_MICROBLAZE_PROCESSOR_H + +#include <asm/ptrace.h> +#include <asm/setup.h> +#include <asm/registers.h> +#include <asm/entry.h> +#include <asm/current.h> + +# ifndef __ASSEMBLY__ +/* from kernel/cpu/mb.c */ +extern const struct seq_operations cpuinfo_op; + +# define cpu_relax() barrier() + +#define task_pt_regs(tsk) \ + (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) + +/* Do necessary setup to start up a newly executed thread. */ +void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp); + +extern void ret_from_fork(void); +extern void ret_from_kernel_thread(void); + +# endif /* __ASSEMBLY__ */ + +# ifndef CONFIG_MMU +/* + * User space process size: memory size + * + * TASK_SIZE on MMU cpu is usually 1GB. However, on no-MMU arch, both + * user processes and the kernel is on the same memory region. They + * both share the memory space and that is limited by the amount of + * physical memory. thus, we set TASK_SIZE == amount of total memory. + */ +# define TASK_SIZE (0x81000000 - 0x80000000) + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +# define current_text_addr() ({ __label__ _l; _l: &&_l; }) + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. We won't be using it + */ +# define TASK_UNMAPPED_BASE 0 + +/* definition in include/linux/sched.h */ +struct task_struct; + +/* thread_struct is gone. use thread_info instead. */ +struct thread_struct { }; +# define INIT_THREAD { } + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *dead_task) +{ +} + +extern unsigned long get_wchan(struct task_struct *p); + +# define KSTK_EIP(tsk) (0) +# define KSTK_ESP(tsk) (0) + +# else /* CONFIG_MMU */ + +/* + * This is used to define STACK_TOP, and with MMU it must be below + * kernel base to select the correct PGD when handling MMU exceptions. + */ +# define TASK_SIZE (CONFIG_KERNEL_START) + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +# define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) + +# define THREAD_KSP 0 + +# ifndef __ASSEMBLY__ + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +# define current_text_addr() ({ __label__ _l; _l: &&_l; }) + +/* If you change this, you must change the associated assembly-languages + * constants defined below, THREAD_*. + */ +struct thread_struct { + /* kernel stack pointer (must be first field in structure) */ + unsigned long ksp; + unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ + void *pgdir; /* root of page-table tree */ + struct pt_regs *regs; /* Pointer to saved register state */ +}; + +# define INIT_THREAD { \ + .ksp = sizeof init_stack + (unsigned long)init_stack, \ + .pgdir = swapper_pg_dir, \ +} + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *dead_task) +{ +} + +unsigned long get_wchan(struct task_struct *p); + +/* The size allocated for kernel stacks. This _must_ be a power of two! */ +# define KERNEL_STACK_SIZE 0x2000 + +/* Return some info about the user process TASK. */ +# define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE) +# define task_regs(task) ((struct pt_regs *)task_tos(task) - 1) + +# define task_pt_regs_plus_args(tsk) \ + ((void *)task_pt_regs(tsk)) + +# define task_sp(task) (task_regs(task)->r1) +# define task_pc(task) (task_regs(task)->pc) +/* Grotty old names for some. */ +# define KSTK_EIP(task) (task_pc(task)) +# define KSTK_ESP(task) (task_sp(task)) + +/* FIXME */ +# define deactivate_mm(tsk, mm) do { } while (0) + +# define STACK_TOP TASK_SIZE +# define STACK_TOP_MAX STACK_TOP + +#ifdef CONFIG_DEBUG_FS +extern struct dentry *of_debugfs_root; +#endif + +# endif /* __ASSEMBLY__ */ +# endif /* CONFIG_MMU */ +#endif /* _ASM_MICROBLAZE_PROCESSOR_H */ diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h new file mode 100644 index 000000000..5b18ec124 --- /dev/null +++ b/arch/microblaze/include/asm/ptrace.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_MICROBLAZE_PTRACE_H +#define _ASM_MICROBLAZE_PTRACE_H + +#include <uapi/asm/ptrace.h> + +#ifndef __ASSEMBLY__ +#define kernel_mode(regs) ((regs)->pt_mode) +#define user_mode(regs) (!kernel_mode(regs)) + +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +#define user_stack_pointer(regs) ((regs)->r1) + +static inline long regs_return_value(struct pt_regs *regs) +{ + return regs->r3; +} + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_MICROBLAZE_PTRACE_H */ diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h new file mode 100644 index 000000000..4bbdb4c03 --- /dev/null +++ b/arch/microblaze/include/asm/pvr.h @@ -0,0 +1,227 @@ +/* + * Support for the MicroBlaze PVR (Processor Version Register) + * + * Copyright (C) 2009 - 2011 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * Copyright (C) 2007 - 2011 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#ifndef _ASM_MICROBLAZE_PVR_H +#define _ASM_MICROBLAZE_PVR_H + +#define PVR_MSR_BIT 0x400 + +struct pvr_s { + unsigned pvr[12]; +}; + +/* The following taken from Xilinx's standalone BSP pvr.h */ + +/* Basic PVR mask */ +#define PVR0_PVR_FULL_MASK 0x80000000 +#define PVR0_USE_BARREL_MASK 0x40000000 +#define PVR0_USE_DIV_MASK 0x20000000 +#define PVR0_USE_HW_MUL_MASK 0x10000000 +#define PVR0_USE_FPU_MASK 0x08000000 +#define PVR0_USE_EXC_MASK 0x04000000 +#define PVR0_USE_ICACHE_MASK 0x02000000 +#define PVR0_USE_DCACHE_MASK 0x01000000 +#define PVR0_USE_MMU 0x00800000 +#define PVR0_USE_BTC 0x00400000 +#define PVR0_ENDI 0x00200000 +#define PVR0_VERSION_MASK 0x0000FF00 +#define PVR0_USER1_MASK 0x000000FF + +/* User 2 PVR mask */ +#define PVR1_USER2_MASK 0xFFFFFFFF + +/* Configuration PVR masks */ +#define PVR2_D_OPB_MASK 0x80000000 /* or AXI */ +#define PVR2_D_LMB_MASK 0x40000000 +#define PVR2_I_OPB_MASK 0x20000000 /* or AXI */ +#define PVR2_I_LMB_MASK 0x10000000 +#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000 +#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000 +#define PVR2_D_PLB_MASK 0x02000000 /* new */ +#define PVR2_I_PLB_MASK 0x01000000 /* new */ +#define PVR2_INTERCONNECT 0x00800000 /* new */ +#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */ +#define PVR2_USE_FSL_EXC 0x00040000 /* new */ +#define PVR2_USE_MSR_INSTR 0x00020000 +#define PVR2_USE_PCMP_INSTR 0x00010000 +#define PVR2_AREA_OPTIMISED 0x00008000 +#define PVR2_USE_BARREL_MASK 0x00004000 +#define PVR2_USE_DIV_MASK 0x00002000 +#define PVR2_USE_HW_MUL_MASK 0x00001000 +#define PVR2_USE_FPU_MASK 0x00000800 +#define PVR2_USE_MUL64_MASK 0x00000400 +#define PVR2_USE_FPU2_MASK 0x00000200 /* new */ +#define PVR2_USE_IPLBEXC 0x00000100 +#define PVR2_USE_DPLBEXC 0x00000080 +#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040 +#define PVR2_UNALIGNED_EXC_MASK 0x00000020 +#define PVR2_ILL_OPCODE_EXC_MASK 0x00000010 +#define PVR2_IOPB_BUS_EXC_MASK 0x00000008 /* or AXI */ +#define PVR2_DOPB_BUS_EXC_MASK 0x00000004 /* or AXI */ +#define PVR2_DIV_ZERO_EXC_MASK 0x00000002 +#define PVR2_FPU_EXC_MASK 0x00000001 + +/* Debug and exception PVR masks */ +#define PVR3_DEBUG_ENABLED_MASK 0x80000000 +#define PVR3_NUMBER_OF_PC_BRK_MASK 0x1E000000 +#define PVR3_NUMBER_OF_RD_ADDR_BRK_MASK 0x00380000 +#define PVR3_NUMBER_OF_WR_ADDR_BRK_MASK 0x0000E000 +#define PVR3_FSL_LINKS_MASK 0x00000380 + +/* ICache config PVR masks */ +#define PVR4_USE_ICACHE_MASK 0x80000000 /* ICU */ +#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000 /* ICTS */ +#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000 /* ICW */ +#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000 /* ICLL */ +#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000 /* ICBS */ +#define PVR4_ICACHE_ALWAYS_USED 0x00008000 /* IAU */ +#define PVR4_ICACHE_INTERFACE 0x00002000 /* ICI */ + +/* DCache config PVR masks */ +#define PVR5_USE_DCACHE_MASK 0x80000000 /* DCU */ +#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000 /* DCTS */ +#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000 /* DCW */ +#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000 /* DCLL */ +#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000 /* DCBS */ +#define PVR5_DCACHE_ALWAYS_USED 0x00008000 /* DAU */ +#define PVR5_DCACHE_USE_WRITEBACK 0x00004000 /* DWB */ +#define PVR5_DCACHE_INTERFACE 0x00002000 /* DCI */ + +/* ICache base address PVR mask */ +#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF + +/* ICache high address PVR mask */ +#define PVR7_ICACHE_HIGHADDR_MASK 0xFFFFFFFF + +/* DCache base address PVR mask */ +#define PVR8_DCACHE_BASEADDR_MASK 0xFFFFFFFF + +/* DCache high address PVR mask */ +#define PVR9_DCACHE_HIGHADDR_MASK 0xFFFFFFFF + +/* Target family PVR mask */ +#define PVR10_TARGET_FAMILY_MASK 0xFF000000 + +/* MMU description */ +#define PVR11_USE_MMU 0xC0000000 +#define PVR11_MMU_ITLB_SIZE 0x38000000 +#define PVR11_MMU_DTLB_SIZE 0x07000000 +#define PVR11_MMU_TLB_ACCESS 0x00C00000 +#define PVR11_MMU_ZONES 0x003C0000 +#define PVR11_MMU_PRIVINS 0x00010000 +/* MSR Reset value PVR mask */ +#define PVR11_MSR_RESET_VALUE_MASK 0x000007FF + +/* PVR access macros */ +#define PVR_IS_FULL(_pvr) (_pvr.pvr[0] & PVR0_PVR_FULL_MASK) +#define PVR_USE_BARREL(_pvr) (_pvr.pvr[0] & PVR0_USE_BARREL_MASK) +#define PVR_USE_DIV(_pvr) (_pvr.pvr[0] & PVR0_USE_DIV_MASK) +#define PVR_USE_HW_MUL(_pvr) (_pvr.pvr[0] & PVR0_USE_HW_MUL_MASK) +#define PVR_USE_FPU(_pvr) (_pvr.pvr[0] & PVR0_USE_FPU_MASK) +#define PVR_USE_FPU2(_pvr) (_pvr.pvr[2] & PVR2_USE_FPU2_MASK) +#define PVR_USE_ICACHE(_pvr) (_pvr.pvr[0] & PVR0_USE_ICACHE_MASK) +#define PVR_USE_DCACHE(_pvr) (_pvr.pvr[0] & PVR0_USE_DCACHE_MASK) +#define PVR_VERSION(_pvr) ((_pvr.pvr[0] & PVR0_VERSION_MASK) >> 8) +#define PVR_USER1(_pvr) (_pvr.pvr[0] & PVR0_USER1_MASK) +#define PVR_USER2(_pvr) (_pvr.pvr[1] & PVR1_USER2_MASK) + +#define PVR_D_OPB(_pvr) (_pvr.pvr[2] & PVR2_D_OPB_MASK) +#define PVR_D_LMB(_pvr) (_pvr.pvr[2] & PVR2_D_LMB_MASK) +#define PVR_I_OPB(_pvr) (_pvr.pvr[2] & PVR2_I_OPB_MASK) +#define PVR_I_LMB(_pvr) (_pvr.pvr[2] & PVR2_I_LMB_MASK) +#define PVR_INTERRUPT_IS_EDGE(_pvr) \ + (_pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK) +#define PVR_EDGE_IS_POSITIVE(_pvr) \ + (_pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK) +#define PVR_USE_MSR_INSTR(_pvr) (_pvr.pvr[2] & PVR2_USE_MSR_INSTR) +#define PVR_USE_PCMP_INSTR(_pvr) (_pvr.pvr[2] & PVR2_USE_PCMP_INSTR) +#define PVR_AREA_OPTIMISED(_pvr) (_pvr.pvr[2] & PVR2_AREA_OPTIMISED) +#define PVR_USE_MUL64(_pvr) (_pvr.pvr[2] & PVR2_USE_MUL64_MASK) +#define PVR_OPCODE_0x0_ILLEGAL(_pvr) \ + (_pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK) +#define PVR_UNALIGNED_EXCEPTION(_pvr) \ + (_pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK) +#define PVR_ILL_OPCODE_EXCEPTION(_pvr) \ + (_pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK) +#define PVR_IOPB_BUS_EXCEPTION(_pvr) \ + (_pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK) +#define PVR_DOPB_BUS_EXCEPTION(_pvr) \ + (_pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK) +#define PVR_DIV_ZERO_EXCEPTION(_pvr) \ + (_pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK) +#define PVR_FPU_EXCEPTION(_pvr) (_pvr.pvr[2] & PVR2_FPU_EXC_MASK) +#define PVR_FSL_EXCEPTION(_pvr) (_pvr.pvr[2] & PVR2_USE_EXTEND_FSL) + +#define PVR_DEBUG_ENABLED(_pvr) (_pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK) +#define PVR_NUMBER_OF_PC_BRK(_pvr) \ + ((_pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25) +#define PVR_NUMBER_OF_RD_ADDR_BRK(_pvr) \ + ((_pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19) +#define PVR_NUMBER_OF_WR_ADDR_BRK(_pvr) \ + ((_pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13) +#define PVR_FSL_LINKS(_pvr) ((_pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7) + +#define PVR_ICACHE_ADDR_TAG_BITS(_pvr) \ + ((_pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26) +#define PVR_ICACHE_USE_FSL(_pvr) \ + (_pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK) +#define PVR_ICACHE_ALLOW_WR(_pvr) \ + (_pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK) +#define PVR_ICACHE_LINE_LEN(_pvr) \ + (1 << ((_pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21)) +#define PVR_ICACHE_BYTE_SIZE(_pvr) \ + (1 << ((_pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16)) + +#define PVR_DCACHE_ADDR_TAG_BITS(_pvr) \ + ((_pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26) +#define PVR_DCACHE_USE_FSL(_pvr) (_pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK) +#define PVR_DCACHE_ALLOW_WR(_pvr) \ + (_pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK) +/* FIXME two shifts on one line needs any comment */ +#define PVR_DCACHE_LINE_LEN(_pvr) \ + (1 << ((_pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21)) +#define PVR_DCACHE_BYTE_SIZE(_pvr) \ + (1 << ((_pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16)) + +#define PVR_DCACHE_USE_WRITEBACK(_pvr) \ + ((_pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14) + +#define PVR_ICACHE_BASEADDR(_pvr) \ + (_pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK) +#define PVR_ICACHE_HIGHADDR(_pvr) \ + (_pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK) +#define PVR_DCACHE_BASEADDR(_pvr) \ + (_pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK) +#define PVR_DCACHE_HIGHADDR(_pvr) \ + (_pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK) + +#define PVR_TARGET_FAMILY(_pvr) \ + ((_pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24) + +#define PVR_MSR_RESET_VALUE(_pvr) \ + (_pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK) + +/* mmu */ +#define PVR_USE_MMU(_pvr) ((_pvr.pvr[11] & PVR11_USE_MMU) >> 30) +#define PVR_MMU_ITLB_SIZE(_pvr) (_pvr.pvr[11] & PVR11_MMU_ITLB_SIZE) +#define PVR_MMU_DTLB_SIZE(_pvr) (_pvr.pvr[11] & PVR11_MMU_DTLB_SIZE) +#define PVR_MMU_TLB_ACCESS(_pvr) (_pvr.pvr[11] & PVR11_MMU_TLB_ACCESS) +#define PVR_MMU_ZONES(_pvr) (_pvr.pvr[11] & PVR11_MMU_ZONES) +#define PVR_MMU_PRIVINS(pvr) (pvr.pvr[11] & PVR11_MMU_PRIVINS) + +/* endian */ +#define PVR_ENDIAN(_pvr) (_pvr.pvr[0] & PVR0_ENDI) + +int cpu_has_pvr(void); +void get_pvr(struct pvr_s *pvr); + +#endif /* _ASM_MICROBLAZE_PVR_H */ diff --git a/arch/microblaze/include/asm/registers.h b/arch/microblaze/include/asm/registers.h new file mode 100644 index 000000000..68c3afb73 --- /dev/null +++ b/arch/microblaze/include/asm/registers.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_REGISTERS_H +#define _ASM_MICROBLAZE_REGISTERS_H + +#define MSR_BE (1<<0) /* 0x001 */ +#define MSR_IE (1<<1) /* 0x002 */ +#define MSR_C (1<<2) /* 0x004 */ +#define MSR_BIP (1<<3) /* 0x008 */ +#define MSR_FSL (1<<4) /* 0x010 */ +#define MSR_ICE (1<<5) /* 0x020 */ +#define MSR_DZ (1<<6) /* 0x040 */ +#define MSR_DCE (1<<7) /* 0x080 */ +#define MSR_EE (1<<8) /* 0x100 */ +#define MSR_EIP (1<<9) /* 0x200 */ +#define MSR_CC (1<<31) + +/* Floating Point Status Register (FSR) Bits */ +#define FSR_IO (1<<4) /* Invalid operation */ +#define FSR_DZ (1<<3) /* Divide-by-zero */ +#define FSR_OF (1<<2) /* Overflow */ +#define FSR_UF (1<<1) /* Underflow */ +#define FSR_DO (1<<0) /* Denormalized operand error */ + +# ifdef CONFIG_MMU +/* Machine State Register (MSR) Fields */ +# define MSR_UM (1<<11) /* User Mode */ +# define MSR_UMS (1<<12) /* User Mode Save */ +# define MSR_VM (1<<13) /* Virtual Mode */ +# define MSR_VMS (1<<14) /* Virtual Mode Save */ + +# define MSR_KERNEL (MSR_EE | MSR_VM) +/* # define MSR_USER (MSR_KERNEL | MSR_UM | MSR_IE) */ +# define MSR_KERNEL_VMS (MSR_EE | MSR_VMS) +/* # define MSR_USER_VMS (MSR_KERNEL_VMS | MSR_UMS | MSR_IE) */ + +/* Exception State Register (ESR) Fields */ +# define ESR_DIZ (1<<11) /* Zone Protection */ +# define ESR_S (1<<10) /* Store instruction */ + +# endif /* CONFIG_MMU */ +#endif /* _ASM_MICROBLAZE_REGISTERS_H */ diff --git a/arch/microblaze/include/asm/seccomp.h b/arch/microblaze/include/asm/seccomp.h new file mode 100644 index 000000000..95cdcabee --- /dev/null +++ b/arch/microblaze/include/asm/seccomp.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MICROBLAZE_SECCOMP_H +#define _ASM_MICROBLAZE_SECCOMP_H + +#include <linux/unistd.h> + +#define __NR_seccomp_sigreturn __NR_sigreturn + +#include <asm-generic/seccomp.h> + +#endif /* _ASM_MICROBLAZE_SECCOMP_H */ diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h new file mode 100644 index 000000000..1b281d3ea --- /dev/null +++ b/arch/microblaze/include/asm/sections.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_SECTIONS_H +#define _ASM_MICROBLAZE_SECTIONS_H + +#include <asm-generic/sections.h> + +# ifndef __ASSEMBLY__ +extern char _ssbss[], _esbss[]; +extern unsigned long __ivt_start[], __ivt_end[]; + +extern u32 _fdt_start[], _fdt_end[]; + +# endif /* !__ASSEMBLY__ */ +#endif /* _ASM_MICROBLAZE_SECTIONS_H */ diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h new file mode 100644 index 000000000..ce9b7b786 --- /dev/null +++ b/arch/microblaze/include/asm/setup.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_MICROBLAZE_SETUP_H +#define _ASM_MICROBLAZE_SETUP_H + +#include <uapi/asm/setup.h> + +# ifndef __ASSEMBLY__ +extern unsigned int boot_cpuid; /* move to smp.h */ + +extern char cmd_line[COMMAND_LINE_SIZE]; + +extern char *klimit; + +# ifdef CONFIG_MMU +extern void mmu_reset(void); +# endif /* CONFIG_MMU */ + +void time_init(void); +void init_IRQ(void); +void machine_early_init(const char *cmdline, unsigned int ram, + unsigned int fdt, unsigned int msr, unsigned int tlb0, + unsigned int tlb1); + +void machine_restart(char *cmd); +void machine_shutdown(void); +void machine_halt(void); +void machine_power_off(void); + +extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); + +# endif /* __ASSEMBLY__ */ +#endif /* _ASM_MICROBLAZE_SETUP_H */ diff --git a/arch/microblaze/include/asm/string.h b/arch/microblaze/include/asm/string.h new file mode 100644 index 000000000..aec2f5929 --- /dev/null +++ b/arch/microblaze/include/asm/string.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_STRING_H +#define _ASM_MICROBLAZE_STRING_H + +#ifdef __KERNEL__ + +#define __HAVE_ARCH_MEMSET +#define __HAVE_ARCH_MEMCPY +#define __HAVE_ARCH_MEMMOVE + +extern void *memset(void *, int, __kernel_size_t); +extern void *memcpy(void *, const void *, __kernel_size_t); +extern void *memmove(void *, const void *, __kernel_size_t); + +#endif /* __KERNEL__ */ + +#endif /* _ASM_MICROBLAZE_STRING_H */ diff --git a/arch/microblaze/include/asm/switch_to.h b/arch/microblaze/include/asm/switch_to.h new file mode 100644 index 000000000..f45baa2c5 --- /dev/null +++ b/arch/microblaze/include/asm/switch_to.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_SWITCH_TO_H +#define _ASM_MICROBLAZE_SWITCH_TO_H + +struct task_struct; +struct thread_info; + +extern struct task_struct *_switch_to(struct thread_info *prev, + struct thread_info *next); + +#define switch_to(prev, next, last) \ + do { \ + (last) = _switch_to(task_thread_info(prev), \ + task_thread_info(next)); \ + } while (0) + +#endif /* _ASM_MICROBLAZE_SWITCH_TO_H */ diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h new file mode 100644 index 000000000..220decd60 --- /dev/null +++ b/arch/microblaze/include/asm/syscall.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MICROBLAZE_SYSCALL_H +#define __ASM_MICROBLAZE_SYSCALL_H + +#include <uapi/linux/audit.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <asm/ptrace.h> + +/* The system call number is given by the user in R12 */ +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->r12; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* TODO. */ +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + return IS_ERR_VALUE(regs->r3) ? regs->r3 : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->r3; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) + regs->r3 = -error; + else + regs->r3 = val; +} + +static inline microblaze_reg_t microblaze_get_syscall_arg(struct pt_regs *regs, + unsigned int n) +{ + switch (n) { + case 5: return regs->r10; + case 4: return regs->r9; + case 3: return regs->r8; + case 2: return regs->r7; + case 1: return regs->r6; + case 0: return regs->r5; + default: + BUG(); + } + return ~0; +} + +static inline void microblaze_set_syscall_arg(struct pt_regs *regs, + unsigned int n, + unsigned long val) +{ + switch (n) { + case 5: + regs->r10 = val; + case 4: + regs->r9 = val; + case 3: + regs->r8 = val; + case 2: + regs->r7 = val; + case 1: + regs->r6 = val; + case 0: + regs->r5 = val; + default: + BUG(); + } +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + while (n--) + *args++ = microblaze_get_syscall_arg(regs, i++); +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + while (n--) + microblaze_set_syscall_arg(regs, i++, *args++); +} + +asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs); +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); + +static inline int syscall_get_arch(void) +{ + return AUDIT_ARCH_MICROBLAZE; +} +#endif /* __ASM_MICROBLAZE_SYSCALL_H */ diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h new file mode 100644 index 000000000..9afe4b5bd --- /dev/null +++ b/arch/microblaze/include/asm/thread_info.h @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_THREAD_INFO_H +#define _ASM_MICROBLAZE_THREAD_INFO_H + +#ifdef __KERNEL__ + +/* we have 8k stack */ +#define THREAD_SHIFT 13 +#define THREAD_SIZE (1 << THREAD_SHIFT) +#define THREAD_SIZE_ORDER 1 + +#ifndef __ASSEMBLY__ +# include <linux/types.h> +# include <asm/processor.h> + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ + +struct cpu_context { + __u32 r1; /* stack pointer */ + __u32 r2; + /* dedicated registers */ + __u32 r13; + __u32 r14; + __u32 r15; + __u32 r16; + __u32 r17; + __u32 r18; + /* non-volatile registers */ + __u32 r19; + __u32 r20; + __u32 r21; + __u32 r22; + __u32 r23; + __u32 r24; + __u32 r25; + __u32 r26; + __u32 r27; + __u32 r28; + __u32 r29; + __u32 r30; + /* r31 is used as current task pointer */ + /* special purpose registers */ + __u32 msr; + __u32 ear; + __u32 esr; + __u32 fsr; +}; + +typedef struct { + unsigned long seg; +} mm_segment_t; + +struct thread_info { + struct task_struct *task; /* main task structure */ + unsigned long flags; /* low level flags */ + unsigned long status; /* thread-synchronous flags */ + __u32 cpu; /* current CPU */ + __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/ + mm_segment_t addr_limit; /* thread address space */ + + struct cpu_context cpu_context; +}; + +/* + * macros/functions for gaining access to the thread information structure + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ +} + +/* how to get the thread information struct from C */ +static inline struct thread_info *current_thread_info(void) +{ + register unsigned long sp asm("r1"); + + return (struct thread_info *)(sp & ~(THREAD_SIZE-1)); +} + +/* thread information allocation */ +#endif /* __ASSEMBLY__ */ + +/* + * thread information flags + * - these are process state flags that various assembly files may + * need to access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +/* restore singlestep on return to user mode */ +#define TIF_SINGLESTEP 4 +#define TIF_MEMDIE 6 /* is terminating due to OOM killer */ +#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ +#define TIF_SECCOMP 10 /* secure computing */ + +/* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_POLLING_NRFLAG 16 + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) + +/* work to do in syscall trace */ +#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ + _TIF_SYSCALL_AUDIT | _TIF_SECCOMP) + +/* work to do on interrupt/exception return */ +#define _TIF_WORK_MASK 0x0000FFFE + +/* work to do on any return to u-space */ +#define _TIF_ALLWORK_MASK 0x0000FFFF + +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +/* FPU was used by this task this quantum (SMP) */ +#define TS_USEDFPU 0x0001 + +#endif /* __KERNEL__ */ +#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */ diff --git a/arch/microblaze/include/asm/timex.h b/arch/microblaze/include/asm/timex.h new file mode 100644 index 000000000..befcf3de5 --- /dev/null +++ b/arch/microblaze/include/asm/timex.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_TIMEX_H +#define _ASM_MICROBLAZE_TIMEX_H + +#include <asm-generic/timex.h> + +#define CLOCK_TICK_RATE 1000 /* Timer input freq. */ + +#endif /* _ASM_TIMEX_H */ diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h new file mode 100644 index 000000000..99b6ded54 --- /dev/null +++ b/arch/microblaze/include/asm/tlb.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_TLB_H +#define _ASM_MICROBLAZE_TLB_H + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include <linux/pagemap.h> + +#ifdef CONFIG_MMU +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) +#endif + +#include <asm-generic/tlb.h> + +#endif /* _ASM_MICROBLAZE_TLB_H */ diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h new file mode 100644 index 000000000..2e1353c2d --- /dev/null +++ b/arch/microblaze/include/asm/tlbflush.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_TLBFLUSH_H +#define _ASM_MICROBLAZE_TLBFLUSH_H + +#ifdef CONFIG_MMU + +#include <linux/sched.h> +#include <linux/threads.h> +#include <asm/processor.h> /* For TASK_SIZE */ +#include <asm/mmu.h> +#include <asm/page.h> +#include <asm/pgalloc.h> + +extern void _tlbie(unsigned long address); +extern void _tlbia(void); + +#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } +#define __tlbie(x) { _tlbie(x); } + +static inline void local_flush_tlb_all(void) + { __tlbia(); } +static inline void local_flush_tlb_mm(struct mm_struct *mm) + { __tlbia(); } +static inline void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) + { __tlbie(vmaddr); } +static inline void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) + { __tlbia(); } + +#define flush_tlb_kernel_range(start, end) do { } while (0) + +#define update_mmu_cache(vma, addr, ptep) do { } while (0) + +#define flush_tlb_all local_flush_tlb_all +#define flush_tlb_mm local_flush_tlb_mm +#define flush_tlb_page local_flush_tlb_page +#define flush_tlb_range local_flush_tlb_range + +/* + * This is called in munmap when we have freed up some page-table + * pages. We don't need to do anything here, there's nothing special + * about our page-table pages. -- paulus + */ +static inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) { } + +#else /* CONFIG_MMU */ + +#define flush_tlb() BUG() +#define flush_tlb_all() BUG() +#define flush_tlb_mm(mm) BUG() +#define flush_tlb_page(vma, addr) BUG() +#define flush_tlb_range(mm, start, end) BUG() +#define flush_tlb_pgtables(mm, start, end) BUG() +#define flush_tlb_kernel_range(start, end) BUG() + +#endif /* CONFIG_MMU */ + +#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h new file mode 100644 index 000000000..81f16aadb --- /dev/null +++ b/arch/microblaze/include/asm/uaccess.h @@ -0,0 +1,380 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_UACCESS_H +#define _ASM_MICROBLAZE_UACCESS_H + +#include <linux/kernel.h> +#include <linux/mm.h> + +#include <asm/mmu.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/extable.h> +#include <linux/string.h> + +/* + * On Microblaze the fs value is actually the top of the corresponding + * address space. + * + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + * + * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. + */ +# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +# ifndef CONFIG_MMU +# define KERNEL_DS MAKE_MM_SEG(0) +# define USER_DS KERNEL_DS +# else +# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) +# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) +# endif + +# define get_ds() (KERNEL_DS) +# define get_fs() (current_thread_info()->addr_limit) +# define set_fs(val) (current_thread_info()->addr_limit = (val)) + +# define segment_eq(a, b) ((a).seg == (b).seg) + +#ifndef CONFIG_MMU + +/* Check against bounds of physical memory */ +static inline int ___range_ok(unsigned long addr, unsigned long size) +{ + return ((addr < memory_start) || + ((addr + size - 1) > (memory_start + memory_size - 1))); +} + +#define __range_ok(addr, size) \ + ___range_ok((unsigned long)(addr), (unsigned long)(size)) + +#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) + +#else + +static inline int access_ok(int type, const void __user *addr, + unsigned long size) +{ + if (!size) + goto ok; + + if ((get_fs().seg < ((unsigned long)addr)) || + (get_fs().seg < ((unsigned long)addr + size - 1))) { + pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", + type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, + (u32)get_fs().seg); + return 0; + } +ok: + pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", + type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, + (u32)get_fs().seg); + return 1; +} +#endif + +#ifdef CONFIG_MMU +# define __FIXUP_SECTION ".section .fixup,\"ax\"\n" +# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" +#else +# define __FIXUP_SECTION ".section .discard,\"ax\"\n" +# define __EX_TABLE_SECTION ".section .discard,\"ax\"\n" +#endif + +extern unsigned long __copy_tofrom_user(void __user *to, + const void __user *from, unsigned long size); + +/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */ +static inline unsigned long __must_check __clear_user(void __user *to, + unsigned long n) +{ + /* normal memset with two words to __ex_table */ + __asm__ __volatile__ ( \ + "1: sb r0, %1, r0;" \ + " addik %0, %0, -1;" \ + " bneid %0, 1b;" \ + " addik %1, %1, 1;" \ + "2: " \ + __EX_TABLE_SECTION \ + ".word 1b,2b;" \ + ".previous;" \ + : "=r"(n), "=r"(to) \ + : "0"(n), "1"(to) + ); + return n; +} + +static inline unsigned long __must_check clear_user(void __user *to, + unsigned long n) +{ + might_fault(); + if (unlikely(!access_ok(VERIFY_WRITE, to, n))) + return n; + + return __clear_user(to, n); +} + +/* put_user and get_user macros */ +extern long __user_bad(void); + +#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ +({ \ + __asm__ __volatile__ ( \ + "1:" insn " %1, %2, r0;" \ + " addk %0, r0, r0;" \ + "2: " \ + __FIXUP_SECTION \ + "3: brid 2b;" \ + " addik %0, r0, %3;" \ + ".previous;" \ + __EX_TABLE_SECTION \ + ".word 1b,3b;" \ + ".previous;" \ + : "=&r"(__gu_err), "=r"(__gu_val) \ + : "r"(__gu_ptr), "i"(-EFAULT) \ + ); \ +}) + +/** + * get_user: - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) + +#define __get_user_check(x, ptr, size) \ +({ \ + unsigned long __gu_val = 0; \ + const typeof(*(ptr)) __user *__gu_addr = (ptr); \ + int __gu_err = 0; \ + \ + if (access_ok(VERIFY_READ, __gu_addr, size)) { \ + switch (size) { \ + case 1: \ + __get_user_asm("lbu", __gu_addr, __gu_val, \ + __gu_err); \ + break; \ + case 2: \ + __get_user_asm("lhu", __gu_addr, __gu_val, \ + __gu_err); \ + break; \ + case 4: \ + __get_user_asm("lw", __gu_addr, __gu_val, \ + __gu_err); \ + break; \ + default: \ + __gu_err = __user_bad(); \ + break; \ + } \ + } else { \ + __gu_err = -EFAULT; \ + } \ + x = (__force typeof(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +#define __get_user(x, ptr) \ +({ \ + unsigned long __gu_val = 0; \ + /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \ + long __gu_err; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \ + break; \ + case 2: \ + __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \ + break; \ + case 4: \ + __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ + break; \ + default: \ + /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ + } \ + x = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + + +#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ +({ \ + __asm__ __volatile__ ( \ + "1:" insn " %1, %2, r0;" \ + " addk %0, r0, r0;" \ + "2: " \ + __FIXUP_SECTION \ + "3: brid 2b;" \ + " addik %0, r0, %3;" \ + ".previous;" \ + __EX_TABLE_SECTION \ + ".word 1b,3b;" \ + ".previous;" \ + : "=&r"(__gu_err) \ + : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ + ); \ +}) + +#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ +({ \ + __asm__ __volatile__ (" lwi %0, %1, 0;" \ + "1: swi %0, %2, 0;" \ + " lwi %0, %1, 4;" \ + "2: swi %0, %2, 4;" \ + " addk %0, r0, r0;" \ + "3: " \ + __FIXUP_SECTION \ + "4: brid 3b;" \ + " addik %0, r0, %3;" \ + ".previous;" \ + __EX_TABLE_SECTION \ + ".word 1b,4b,2b,4b;" \ + ".previous;" \ + : "=&r"(__gu_err) \ + : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ + ); \ +}) + +/** + * put_user: - Write a simple value into user space. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ +#define put_user(x, ptr) \ + __put_user_check((x), (ptr), sizeof(*(ptr))) + +#define __put_user_check(x, ptr, size) \ +({ \ + typeof(*(ptr)) volatile __pu_val = x; \ + typeof(*(ptr)) __user *__pu_addr = (ptr); \ + int __pu_err = 0; \ + \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \ + switch (size) { \ + case 1: \ + __put_user_asm("sb", __pu_addr, __pu_val, \ + __pu_err); \ + break; \ + case 2: \ + __put_user_asm("sh", __pu_addr, __pu_val, \ + __pu_err); \ + break; \ + case 4: \ + __put_user_asm("sw", __pu_addr, __pu_val, \ + __pu_err); \ + break; \ + case 8: \ + __put_user_asm_8(__pu_addr, __pu_val, __pu_err);\ + break; \ + default: \ + __pu_err = __user_bad(); \ + break; \ + } \ + } else { \ + __pu_err = -EFAULT; \ + } \ + __pu_err; \ +}) + +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) volatile __gu_val = (x); \ + long __gu_err = 0; \ + switch (sizeof(__gu_val)) { \ + case 1: \ + __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ + break; \ + case 2: \ + __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ + break; \ + case 4: \ + __put_user_asm("sw", (ptr), __gu_val, __gu_err); \ + break; \ + case 8: \ + __put_user_asm_8((ptr), __gu_val, __gu_err); \ + break; \ + default: \ + /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \ + } \ + __gu_err; \ +}) + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return __copy_tofrom_user((__force void __user *)to, from, n); +} + +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return __copy_tofrom_user(to, (__force const void __user *)from, n); +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +/* + * Copy a null terminated string from userspace. + */ +extern int __strncpy_user(char *to, const char __user *from, int len); + +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return -EFAULT; + return __strncpy_user(dst, src, count); +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +extern int __strnlen_user(const char __user *sstr, int len); + +static inline long strnlen_user(const char __user *src, long n) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return 0; + return __strnlen_user(src, n); +} + +#endif /* _ASM_MICROBLAZE_UACCESS_H */ diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h new file mode 100644 index 000000000..b162ed880 --- /dev/null +++ b/arch/microblaze/include/asm/unaligned.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2008 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_UNALIGNED_H +#define _ASM_MICROBLAZE_UNALIGNED_H + +# ifdef __KERNEL__ + +# ifdef __MICROBLAZEEL__ +# include <linux/unaligned/le_struct.h> +# include <linux/unaligned/be_byteshift.h> +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +# else +# include <linux/unaligned/be_struct.h> +# include <linux/unaligned/le_byteshift.h> +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +# endif + +# include <linux/unaligned/generic.h> + +# endif /* __KERNEL__ */ +#endif /* _ASM_MICROBLAZE_UNALIGNED_H */ diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h new file mode 100644 index 000000000..a62d09420 --- /dev/null +++ b/arch/microblaze/include/asm/unistd.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_MICROBLAZE_UNISTD_H +#define _ASM_MICROBLAZE_UNISTD_H + +#include <uapi/asm/unistd.h> + +#ifndef __ASSEMBLY__ + +/* #define __ARCH_WANT_OLD_READDIR */ +/* #define __ARCH_WANT_OLD_STAT */ +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +/* #define __ARCH_WANT_SYS_OLD_GETRLIMIT */ +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_FORK + +#endif /* __ASSEMBLY__ */ + +#define __NR_syscalls 401 + +#endif /* _ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/include/asm/unwind.h b/arch/microblaze/include/asm/unwind.h new file mode 100644 index 000000000..d248b7de4 --- /dev/null +++ b/arch/microblaze/include/asm/unwind.h @@ -0,0 +1,29 @@ +/* + * Backtrace support for Microblaze + * + * Copyright (C) 2010 Digital Design Corporation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __MICROBLAZE_UNWIND_H +#define __MICROBLAZE_UNWIND_H + +struct stack_trace; + +struct trap_handler_info { + unsigned long start_addr; + unsigned long end_addr; + const char *trap_name; +}; +extern struct trap_handler_info microblaze_trap_handlers; + +extern const char _hw_exception_handler; +extern const char ex_handler_unhandled; + +void microblaze_unwind(struct task_struct *task, struct stack_trace *trace); + +#endif /* __MICROBLAZE_UNWIND_H */ + diff --git a/arch/microblaze/include/asm/user.h b/arch/microblaze/include/asm/user.h new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/arch/microblaze/include/asm/user.h @@ -0,0 +1 @@ + diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild new file mode 100644 index 000000000..2c6a6bffe --- /dev/null +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -0,0 +1,30 @@ +# UAPI Header export list +include include/uapi/asm-generic/Kbuild.asm + +generic-y += bitsperlong.h +generic-y += bpf_perf_event.h +generic-y += errno.h +generic-y += fcntl.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += ipcbuf.h +generic-y += kvm_para.h +generic-y += mman.h +generic-y += msgbuf.h +generic-y += param.h +generic-y += poll.h +generic-y += resource.h +generic-y += sembuf.h +generic-y += shmbuf.h +generic-y += shmparam.h +generic-y += siginfo.h +generic-y += signal.h +generic-y += socket.h +generic-y += sockios.h +generic-y += stat.h +generic-y += statfs.h +generic-y += swab.h +generic-y += termbits.h +generic-y += termios.h +generic-y += types.h +generic-y += ucontext.h diff --git a/arch/microblaze/include/uapi/asm/auxvec.h b/arch/microblaze/include/uapi/asm/auxvec.h new file mode 100644 index 000000000..93dd07bd0 --- /dev/null +++ b/arch/microblaze/include/uapi/asm/auxvec.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + diff --git a/arch/microblaze/include/uapi/asm/byteorder.h b/arch/microblaze/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..763660169 --- /dev/null +++ b/arch/microblaze/include/uapi/asm/byteorder.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_MICROBLAZE_BYTEORDER_H +#define _ASM_MICROBLAZE_BYTEORDER_H + +#ifdef __MICROBLAZEEL__ +#include <linux/byteorder/little_endian.h> +#else +#include <linux/byteorder/big_endian.h> +#endif + +#endif /* _ASM_MICROBLAZE_BYTEORDER_H */ diff --git a/arch/microblaze/include/uapi/asm/elf.h b/arch/microblaze/include/uapi/asm/elf.h new file mode 100644 index 000000000..6b656de29 --- /dev/null +++ b/arch/microblaze/include/uapi/asm/elf.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _UAPI_ASM_MICROBLAZE_ELF_H +#define _UAPI_ASM_MICROBLAZE_ELF_H + +#include <linux/elf-em.h> + +/* + * Note there is no "official" ELF designation for Microblaze. + * I've snaffled the value from the microblaze binutils source code + * /binutils/microblaze/include/elf/microblaze.h + */ +#define EM_MICROBLAZE_OLD 0xbaab +#define ELF_ARCH EM_MICROBLAZE + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_MICROBLAZE \ + || (x)->e_machine == EM_MICROBLAZE_OLD) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 + +#ifndef __uClinux__ + +/* + * ELF register definitions.. + */ + +#include <asm/ptrace.h> +#include <asm/byteorder.h> + +#ifndef ELF_GREG_T +#define ELF_GREG_T +typedef unsigned long elf_greg_t; +#endif + +#ifndef ELF_NGREG +#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) +#endif + +#ifndef ELF_GREGSET_T +#define ELF_GREGSET_T +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; +#endif + +#ifndef ELF_FPREGSET_T +#define ELF_FPREGSET_T + +/* TBD */ +#define ELF_NFPREG 33 /* includes fsr */ +typedef unsigned long elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +/* typedef struct user_fpu_struct elf_fpregset_t; */ +#endif + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ + +#define ELF_ET_DYN_BASE (0x08000000) + +#ifdef __MICROBLAZEEL__ +#define ELF_DATA ELFDATA2LSB +#else +#define ELF_DATA ELFDATA2MSB +#endif + +#define ELF_EXEC_PAGESIZE PAGE_SIZE + + +#define ELF_CORE_COPY_REGS(_dest, _regs) \ + memcpy((char *) &_dest, (char *) _regs, \ + sizeof(struct pt_regs)); + +/* This yields a mask that user programs can use to figure out what + * instruction set this CPU supports. This could be done in user space, + * but it's not easy, and we've already done it here. + */ +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + + * For the moment, we have only optimizations for the Intel generations, + * but that could change... + */ +#define ELF_PLATFORM (NULL) + +/* Added _f parameter. Is this definition correct: TBD */ +#define ELF_PLAT_INIT(_r, _f) \ +do { \ + _r->r0 = _r->r1 = _r->r2 = _r->r3 = \ + _r->r4 = _r->r5 = _r->r6 = _r->r7 = \ + _r->r8 = _r->r9 = _r->r10 = _r->r11 = \ + _r->r12 = _r->r13 = _r->r14 = _r->r15 = \ + _r->r16 = _r->r17 = _r->r18 = _r->r19 = \ + _r->r20 = _r->r21 = _r->r22 = _r->r23 = \ + _r->r24 = _r->r25 = _r->r26 = _r->r27 = \ + _r->r28 = _r->r29 = _r->r30 = _r->r31 = \ + 0; \ +} while (0) + + +#endif /* __uClinux__ */ + +#endif /* _UAPI_ASM_MICROBLAZE_ELF_H */ diff --git a/arch/microblaze/include/uapi/asm/posix_types.h b/arch/microblaze/include/uapi/asm/posix_types.h new file mode 100644 index 000000000..f3249da69 --- /dev/null +++ b/arch/microblaze/include/uapi/asm/posix_types.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_MICROBLAZE_POSIX_TYPES_H +#define _ASM_MICROBLAZE_POSIX_TYPES_H + +typedef unsigned short __kernel_mode_t; +#define __kernel_mode_t __kernel_mode_t + +#include <asm-generic/posix_types.h> + +#endif /* _ASM_MICROBLAZE_POSIX_TYPES_H */ diff --git a/arch/microblaze/include/uapi/asm/ptrace.h b/arch/microblaze/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..46dd94cb7 --- /dev/null +++ b/arch/microblaze/include/uapi/asm/ptrace.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _UAPI_ASM_MICROBLAZE_PTRACE_H +#define _UAPI_ASM_MICROBLAZE_PTRACE_H + +#ifndef __ASSEMBLY__ + +typedef unsigned long microblaze_reg_t; + +struct pt_regs { + microblaze_reg_t r0; + microblaze_reg_t r1; + microblaze_reg_t r2; + microblaze_reg_t r3; + microblaze_reg_t r4; + microblaze_reg_t r5; + microblaze_reg_t r6; + microblaze_reg_t r7; + microblaze_reg_t r8; + microblaze_reg_t r9; + microblaze_reg_t r10; + microblaze_reg_t r11; + microblaze_reg_t r12; + microblaze_reg_t r13; + microblaze_reg_t r14; + microblaze_reg_t r15; + microblaze_reg_t r16; + microblaze_reg_t r17; + microblaze_reg_t r18; + microblaze_reg_t r19; + microblaze_reg_t r20; + microblaze_reg_t r21; + microblaze_reg_t r22; + microblaze_reg_t r23; + microblaze_reg_t r24; + microblaze_reg_t r25; + microblaze_reg_t r26; + microblaze_reg_t r27; + microblaze_reg_t r28; + microblaze_reg_t r29; + microblaze_reg_t r30; + microblaze_reg_t r31; + microblaze_reg_t pc; + microblaze_reg_t msr; + microblaze_reg_t ear; + microblaze_reg_t esr; + microblaze_reg_t fsr; + int pt_mode; +}; + +#ifndef __KERNEL__ + +/* pt_regs offsets used by gdbserver etc in ptrace syscalls */ +#define PT_GPR(n) ((n) * sizeof(microblaze_reg_t)) +#define PT_PC (32 * sizeof(microblaze_reg_t)) +#define PT_MSR (33 * sizeof(microblaze_reg_t)) +#define PT_EAR (34 * sizeof(microblaze_reg_t)) +#define PT_ESR (35 * sizeof(microblaze_reg_t)) +#define PT_FSR (36 * sizeof(microblaze_reg_t)) +#define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t)) + +#endif /* __KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_ASM_MICROBLAZE_PTRACE_H */ diff --git a/arch/microblaze/include/uapi/asm/setup.h b/arch/microblaze/include/uapi/asm/setup.h new file mode 100644 index 000000000..6831794e6 --- /dev/null +++ b/arch/microblaze/include/uapi/asm/setup.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _UAPI_ASM_MICROBLAZE_SETUP_H +#define _UAPI_ASM_MICROBLAZE_SETUP_H + +#define COMMAND_LINE_SIZE 256 + +# ifndef __ASSEMBLY__ + +# endif /* __ASSEMBLY__ */ +#endif /* _UAPI_ASM_MICROBLAZE_SETUP_H */ diff --git a/arch/microblaze/include/uapi/asm/sigcontext.h b/arch/microblaze/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..47eb2e211 --- /dev/null +++ b/arch/microblaze/include/uapi/asm/sigcontext.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_SIGCONTEXT_H +#define _ASM_MICROBLAZE_SIGCONTEXT_H + +/* FIXME should be linux/ptrace.h */ +#include <asm/ptrace.h> + +struct sigcontext { + struct pt_regs regs; + unsigned long oldmask; +}; + +#endif /* _ASM_MICROBLAZE_SIGCONTEXT_H */ diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h new file mode 100644 index 000000000..7a9f16a76 --- /dev/null +++ b/arch/microblaze/include/uapi/asm/unistd.h @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _UAPI_ASM_MICROBLAZE_UNISTD_H +#define _UAPI_ASM_MICROBLAZE_UNISTD_H + +#define __NR_restart_syscall 0 /* ok */ +#define __NR_exit 1 /* ok */ +#define __NR_fork 2 /* not for no MMU - weird */ +#define __NR_read 3 /* ok */ +#define __NR_write 4 /* ok */ +#define __NR_open 5 /* openat */ +#define __NR_close 6 /* ok */ +#define __NR_waitpid 7 /* waitid */ +#define __NR_creat 8 /* openat */ +#define __NR_link 9 /* linkat */ +#define __NR_unlink 10 /* unlinkat */ +#define __NR_execve 11 /* ok */ +#define __NR_chdir 12 /* ok */ +#define __NR_time 13 /* obsolete -> sys_gettimeofday */ +#define __NR_mknod 14 /* mknodat */ +#define __NR_chmod 15 /* fchmodat */ +#define __NR_lchown 16 /* ok */ +#define __NR_break 17 /* don't know */ +#define __NR_oldstat 18 /* remove */ +#define __NR_lseek 19 /* ok */ +#define __NR_getpid 20 /* ok */ +#define __NR_mount 21 /* ok */ +#define __NR_umount 22 /* ok */ /* use only umount2 */ +#define __NR_setuid 23 /* ok */ +#define __NR_getuid 24 /* ok */ +#define __NR_stime 25 /* obsolete -> sys_settimeofday */ +#define __NR_ptrace 26 /* ok */ +#define __NR_alarm 27 /* obsolete -> sys_setitimer */ +#define __NR_oldfstat 28 /* remove */ +#define __NR_pause 29 /* obsolete -> sys_rt_sigtimedwait */ +#define __NR_utime 30 /* obsolete -> sys_utimesat */ +#define __NR_stty 31 /* remove */ +#define __NR_gtty 32 /* remove */ +#define __NR_access 33 /* faccessat */ +/* can be implemented by sys_setpriority */ +#define __NR_nice 34 +#define __NR_ftime 35 /* remove */ +#define __NR_sync 36 /* ok */ +#define __NR_kill 37 /* ok */ +#define __NR_rename 38 /* renameat */ +#define __NR_mkdir 39 /* mkdirat */ +#define __NR_rmdir 40 /* unlinkat */ +#define __NR_dup 41 /* ok */ +#define __NR_pipe 42 /* ok */ +#define __NR_times 43 /* ok */ +#define __NR_prof 44 /* remove */ +#define __NR_brk 45 /* ok -mmu, nommu specific */ +#define __NR_setgid 46 /* ok */ +#define __NR_getgid 47 /* ok */ +#define __NR_signal 48 /* obsolete -> sys_rt_sigaction */ +#define __NR_geteuid 49 /* ok */ +#define __NR_getegid 50 /* ok */ +#define __NR_acct 51 /* add it and then I can disable it */ +#define __NR_umount2 52 /* remove */ +#define __NR_lock 53 /* remove */ +#define __NR_ioctl 54 /* ok */ +#define __NR_fcntl 55 /* ok -> 64bit version*/ +#define __NR_mpx 56 /* remove */ +#define __NR_setpgid 57 /* ok */ +#define __NR_ulimit 58 /* remove */ +#define __NR_oldolduname 59 /* remove */ +#define __NR_umask 60 /* ok */ +#define __NR_chroot 61 /* ok */ +#define __NR_ustat 62 /* obsolete -> statfs64 */ +#define __NR_dup2 63 /* ok */ +#define __NR_getppid 64 /* ok */ +#define __NR_getpgrp 65 /* obsolete -> sys_getpgid */ +#define __NR_setsid 66 /* ok */ +#define __NR_sigaction 67 /* obsolete -> rt_sigaction */ +#define __NR_sgetmask 68 /* obsolete -> sys_rt_sigprocmask */ +#define __NR_ssetmask 69 /* obsolete ->sys_rt_sigprocmask */ +#define __NR_setreuid 70 /* ok */ +#define __NR_setregid 71 /* ok */ +#define __NR_sigsuspend 72 /* obsolete -> rt_sigsuspend */ +#define __NR_sigpending 73 /* obsolete -> sys_rt_sigpending */ +#define __NR_sethostname 74 /* ok */ +#define __NR_setrlimit 75 /* ok */ +#define __NR_getrlimit 76 /* ok Back compatible 2G limited rlimit */ +#define __NR_getrusage 77 /* ok */ +#define __NR_gettimeofday 78 /* ok */ +#define __NR_settimeofday 79 /* ok */ +#define __NR_getgroups 80 /* ok */ +#define __NR_setgroups 81 /* ok */ +#define __NR_select 82 /* obsolete -> sys_pselect6 */ +#define __NR_symlink 83 /* symlinkat */ +#define __NR_oldlstat 84 /* remove */ +#define __NR_readlink 85 /* obsolete -> sys_readlinkat */ +#define __NR_uselib 86 /* remove */ +#define __NR_swapon 87 /* ok */ +#define __NR_reboot 88 /* ok */ +#define __NR_readdir 89 /* remove ? */ +#define __NR_mmap 90 /* obsolete -> sys_mmap2 */ +#define __NR_munmap 91 /* ok - mmu and nommu */ +#define __NR_truncate 92 /* ok or truncate64 */ +#define __NR_ftruncate 93 /* ok or ftruncate64 */ +#define __NR_fchmod 94 /* ok */ +#define __NR_fchown 95 /* ok */ +#define __NR_getpriority 96 /* ok */ +#define __NR_setpriority 97 /* ok */ +#define __NR_profil 98 /* remove */ +#define __NR_statfs 99 /* ok or statfs64 */ +#define __NR_fstatfs 100 /* ok or fstatfs64 */ +#define __NR_ioperm 101 /* remove */ +#define __NR_socketcall 102 /* remove */ +#define __NR_syslog 103 /* ok */ +#define __NR_setitimer 104 /* ok */ +#define __NR_getitimer 105 /* ok */ +#define __NR_stat 106 /* remove */ +#define __NR_lstat 107 /* remove */ +#define __NR_fstat 108 /* remove */ +#define __NR_olduname 109 /* remove */ +#define __NR_iopl 110 /* remove */ +#define __NR_vhangup 111 /* ok */ +#define __NR_idle 112 /* remove */ +#define __NR_vm86old 113 /* remove */ +#define __NR_wait4 114 /* obsolete -> waitid */ +#define __NR_swapoff 115 /* ok */ +#define __NR_sysinfo 116 /* ok */ +#define __NR_ipc 117 /* remove - direct call */ +#define __NR_fsync 118 /* ok */ +#define __NR_sigreturn 119 /* obsolete -> sys_rt_sigreturn */ +#define __NR_clone 120 /* ok */ +#define __NR_setdomainname 121 /* ok */ +#define __NR_uname 122 /* remove */ +#define __NR_modify_ldt 123 /* remove */ +#define __NR_adjtimex 124 /* ok */ +#define __NR_mprotect 125 /* remove */ +#define __NR_sigprocmask 126 /* obsolete -> sys_rt_sigprocmask */ +#define __NR_create_module 127 /* remove */ +#define __NR_init_module 128 /* ok */ +#define __NR_delete_module 129 /* ok */ +#define __NR_get_kernel_syms 130 /* remove */ +#define __NR_quotactl 131 /* ok */ +#define __NR_getpgid 132 /* ok */ +#define __NR_fchdir 133 /* ok */ +#define __NR_bdflush 134 /* remove */ +#define __NR_sysfs 135 /* needed for busybox */ +#define __NR_personality 136 /* ok */ +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define __NR_setfsuid 138 /* ok */ +#define __NR_setfsgid 139 /* ok */ +#define __NR__llseek 140 /* remove only lseek */ +#define __NR_getdents 141 /* ok or getdents64 */ +#define __NR__newselect 142 /* remove */ +#define __NR_flock 143 /* ok */ +#define __NR_msync 144 /* remove */ +#define __NR_readv 145 /* ok */ +#define __NR_writev 146 /* ok */ +#define __NR_getsid 147 /* ok */ +#define __NR_fdatasync 148 /* ok */ +#define __NR__sysctl 149 /* remove */ +#define __NR_mlock 150 /* ok - nommu or mmu */ +#define __NR_munlock 151 /* ok - nommu or mmu */ +#define __NR_mlockall 152 /* ok - nommu or mmu */ +#define __NR_munlockall 153 /* ok - nommu or mmu */ +#define __NR_sched_setparam 154 /* ok */ +#define __NR_sched_getparam 155 /* ok */ +#define __NR_sched_setscheduler 156 /* ok */ +#define __NR_sched_getscheduler 157 /* ok */ +#define __NR_sched_yield 158 /* ok */ +#define __NR_sched_get_priority_max 159 /* ok */ +#define __NR_sched_get_priority_min 160 /* ok */ +#define __NR_sched_rr_get_interval 161 /* ok */ +#define __NR_nanosleep 162 /* ok */ +#define __NR_mremap 163 /* ok - nommu or mmu */ +#define __NR_setresuid 164 /* ok */ +#define __NR_getresuid 165 /* ok */ +#define __NR_vm86 166 /* remove */ +#define __NR_query_module 167 /* ok */ +#define __NR_poll 168 /* obsolete -> sys_ppoll */ +#define __NR_nfsservctl 169 /* ok */ +#define __NR_setresgid 170 /* ok */ +#define __NR_getresgid 171 /* ok */ +#define __NR_prctl 172 /* ok */ +#define __NR_rt_sigreturn 173 /* ok */ +#define __NR_rt_sigaction 174 /* ok */ +#define __NR_rt_sigprocmask 175 /* ok */ +#define __NR_rt_sigpending 176 /* ok */ +#define __NR_rt_sigtimedwait 177 /* ok */ +#define __NR_rt_sigqueueinfo 178 /* ok */ +#define __NR_rt_sigsuspend 179 /* ok */ +#define __NR_pread64 180 /* ok */ +#define __NR_pwrite64 181 /* ok */ +#define __NR_chown 182 /* obsolete -> fchownat */ +#define __NR_getcwd 183 /* ok */ +#define __NR_capget 184 /* ok */ +#define __NR_capset 185 /* ok */ +#define __NR_sigaltstack 186 /* remove */ +#define __NR_sendfile 187 /* ok -> exist 64bit version*/ +#define __NR_getpmsg 188 /* remove */ +/* remove - some people actually want streams */ +#define __NR_putpmsg 189 +/* for noMMU - group with clone -> maybe remove */ +#define __NR_vfork 190 +#define __NR_ugetrlimit 191 /* remove - SuS compliant getrlimit */ +#define __NR_mmap2 192 /* ok */ +#define __NR_truncate64 193 /* ok */ +#define __NR_ftruncate64 194 /* ok */ +#define __NR_stat64 195 /* remove _ARCH_WANT_STAT64 */ +#define __NR_lstat64 196 /* remove _ARCH_WANT_STAT64 */ +#define __NR_fstat64 197 /* remove _ARCH_WANT_STAT64 */ +#define __NR_lchown32 198 /* ok - without 32 */ +#define __NR_getuid32 199 /* ok - without 32 */ +#define __NR_getgid32 200 /* ok - without 32 */ +#define __NR_geteuid32 201 /* ok - without 32 */ +#define __NR_getegid32 202 /* ok - without 32 */ +#define __NR_setreuid32 203 /* ok - without 32 */ +#define __NR_setregid32 204 /* ok - without 32 */ +#define __NR_getgroups32 205 /* ok - without 32 */ +#define __NR_setgroups32 206 /* ok - without 32 */ +#define __NR_fchown32 207 /* ok - without 32 */ +#define __NR_setresuid32 208 /* ok - without 32 */ +#define __NR_getresuid32 209 /* ok - without 32 */ +#define __NR_setresgid32 210 /* ok - without 32 */ +#define __NR_getresgid32 211 /* ok - without 32 */ +#define __NR_chown32 212 /* ok - without 32 -obsolete -> fchownat */ +#define __NR_setuid32 213 /* ok - without 32 */ +#define __NR_setgid32 214 /* ok - without 32 */ +#define __NR_setfsuid32 215 /* ok - without 32 */ +#define __NR_setfsgid32 216 /* ok - without 32 */ +#define __NR_pivot_root 217 /* ok */ +#define __NR_mincore 218 /* ok */ +#define __NR_madvise 219 /* ok */ +#define __NR_getdents64 220 /* ok */ +#define __NR_fcntl64 221 /* ok */ +/* 223 is unused */ +#define __NR_gettid 224 /* ok */ +#define __NR_readahead 225 /* ok */ +#define __NR_setxattr 226 /* ok */ +#define __NR_lsetxattr 227 /* ok */ +#define __NR_fsetxattr 228 /* ok */ +#define __NR_getxattr 229 /* ok */ +#define __NR_lgetxattr 230 /* ok */ +#define __NR_fgetxattr 231 /* ok */ +#define __NR_listxattr 232 /* ok */ +#define __NR_llistxattr 233 /* ok */ +#define __NR_flistxattr 234 /* ok */ +#define __NR_removexattr 235 /* ok */ +#define __NR_lremovexattr 236 /* ok */ +#define __NR_fremovexattr 237 /* ok */ +#define __NR_tkill 238 /* ok */ +#define __NR_sendfile64 239 /* ok */ +#define __NR_futex 240 /* ok */ +#define __NR_sched_setaffinity 241 /* ok */ +#define __NR_sched_getaffinity 242 /* ok */ +#define __NR_set_thread_area 243 /* remove */ +#define __NR_get_thread_area 244 /* remove */ +#define __NR_io_setup 245 /* ok */ +#define __NR_io_destroy 246 /* ok */ +#define __NR_io_getevents 247 /* ok */ +#define __NR_io_submit 248 /* ok */ +#define __NR_io_cancel 249 /* ok */ +#define __NR_fadvise64 250 /* remove -> sys_fadvise64_64 */ +/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ +#define __NR_exit_group 252 /* ok */ +#define __NR_lookup_dcookie 253 /* ok */ +#define __NR_epoll_create 254 /* ok */ +#define __NR_epoll_ctl 255 /* ok */ +#define __NR_epoll_wait 256 /* obsolete -> sys_epoll_pwait */ +#define __NR_remap_file_pages 257 /* only for mmu */ +#define __NR_set_tid_address 258 /* ok */ +#define __NR_timer_create 259 /* ok */ +#define __NR_timer_settime (__NR_timer_create+1) /* 260 */ /* ok */ +#define __NR_timer_gettime (__NR_timer_create+2) /* 261 */ /* ok */ +#define __NR_timer_getoverrun (__NR_timer_create+3) /* 262 */ /* ok */ +#define __NR_timer_delete (__NR_timer_create+4) /* 263 */ /* ok */ +#define __NR_clock_settime (__NR_timer_create+5) /* 264 */ /* ok */ +#define __NR_clock_gettime (__NR_timer_create+6) /* 265 */ /* ok */ +#define __NR_clock_getres (__NR_timer_create+7) /* 266 */ /* ok */ +#define __NR_clock_nanosleep (__NR_timer_create+8) /* 267 */ /* ok */ +#define __NR_statfs64 268 /* ok */ +#define __NR_fstatfs64 269 /* ok */ +#define __NR_tgkill 270 /* ok */ +#define __NR_utimes 271 /* obsolete -> sys_futimesat */ +#define __NR_fadvise64_64 272 /* ok */ +#define __NR_vserver 273 /* ok */ +#define __NR_mbind 274 /* only for mmu */ +#define __NR_get_mempolicy 275 /* only for mmu */ +#define __NR_set_mempolicy 276 /* only for mmu */ +#define __NR_mq_open 277 /* ok */ +#define __NR_mq_unlink (__NR_mq_open+1) /* 278 */ /* ok */ +#define __NR_mq_timedsend (__NR_mq_open+2) /* 279 */ /* ok */ +#define __NR_mq_timedreceive (__NR_mq_open+3) /* 280 */ /* ok */ +#define __NR_mq_notify (__NR_mq_open+4) /* 281 */ /* ok */ +#define __NR_mq_getsetattr (__NR_mq_open+5) /* 282 */ /* ok */ +#define __NR_kexec_load 283 /* ok */ +#define __NR_waitid 284 /* ok */ +/* #define __NR_sys_setaltroot 285 */ +#define __NR_add_key 286 /* ok */ +#define __NR_request_key 287 /* ok */ +#define __NR_keyctl 288 /* ok */ +#define __NR_ioprio_set 289 /* ok */ +#define __NR_ioprio_get 290 /* ok */ +#define __NR_inotify_init 291 /* ok */ +#define __NR_inotify_add_watch 292 /* ok */ +#define __NR_inotify_rm_watch 293 /* ok */ +#define __NR_migrate_pages 294 /* mmu */ +#define __NR_openat 295 /* ok */ +#define __NR_mkdirat 296 /* ok */ +#define __NR_mknodat 297 /* ok */ +#define __NR_fchownat 298 /* ok */ +#define __NR_futimesat 299 /* obsolete -> sys_utimesat */ +#define __NR_fstatat64 300 /* stat64 */ +#define __NR_unlinkat 301 /* ok */ +#define __NR_renameat 302 /* ok */ +#define __NR_linkat 303 /* ok */ +#define __NR_symlinkat 304 /* ok */ +#define __NR_readlinkat 305 /* ok */ +#define __NR_fchmodat 306 /* ok */ +#define __NR_faccessat 307 /* ok */ +#define __NR_pselect6 308 /* ok */ +#define __NR_ppoll 309 /* ok */ +#define __NR_unshare 310 /* ok */ +#define __NR_set_robust_list 311 /* ok */ +#define __NR_get_robust_list 312 /* ok */ +#define __NR_splice 313 /* ok */ +#define __NR_sync_file_range 314 /* ok */ +#define __NR_tee 315 /* ok */ +#define __NR_vmsplice 316 /* ok */ +#define __NR_move_pages 317 /* mmu */ +#define __NR_getcpu 318 /* ok */ +#define __NR_epoll_pwait 319 /* ok */ +#define __NR_utimensat 320 /* ok */ +#define __NR_signalfd 321 /* ok */ +#define __NR_timerfd_create 322 /* ok */ +#define __NR_eventfd 323 /* ok */ +#define __NR_fallocate 324 /* ok */ +#define __NR_semtimedop 325 /* ok - semaphore group */ +#define __NR_timerfd_settime 326 /* ok */ +#define __NR_timerfd_gettime 327 /* ok */ +/* sysv ipc syscalls */ +#define __NR_semctl 328 /* ok */ +#define __NR_semget 329 /* ok */ +#define __NR_semop 330 /* ok */ +#define __NR_msgctl 331 /* ok */ +#define __NR_msgget 332 /* ok */ +#define __NR_msgrcv 333 /* ok */ +#define __NR_msgsnd 334 /* ok */ +#define __NR_shmat 335 /* ok */ +#define __NR_shmctl 336 /* ok */ +#define __NR_shmdt 337 /* ok */ +#define __NR_shmget 338 /* ok */ + + +#define __NR_signalfd4 339 /* new */ +#define __NR_eventfd2 340 /* new */ +#define __NR_epoll_create1 341 /* new */ +#define __NR_dup3 342 /* new */ +#define __NR_pipe2 343 /* new */ +#define __NR_inotify_init1 344 /* new */ +#define __NR_socket 345 /* new */ +#define __NR_socketpair 346 /* new */ +#define __NR_bind 347 /* new */ +#define __NR_listen 348 /* new */ +#define __NR_accept 349 /* new */ +#define __NR_connect 350 /* new */ +#define __NR_getsockname 351 /* new */ +#define __NR_getpeername 352 /* new */ +#define __NR_sendto 353 /* new */ +#define __NR_send 354 /* new */ +#define __NR_recvfrom 355 /* new */ +#define __NR_recv 356 /* new */ +#define __NR_setsockopt 357 /* new */ +#define __NR_getsockopt 358 /* new */ +#define __NR_shutdown 359 /* new */ +#define __NR_sendmsg 360 /* new */ +#define __NR_recvmsg 361 /* new */ +#define __NR_accept4 362 /* new */ +#define __NR_preadv 363 /* new */ +#define __NR_pwritev 364 /* new */ +#define __NR_rt_tgsigqueueinfo 365 /* new */ +#define __NR_perf_event_open 366 /* new */ +#define __NR_recvmmsg 367 /* new */ +#define __NR_fanotify_init 368 +#define __NR_fanotify_mark 369 +#define __NR_prlimit64 370 +#define __NR_name_to_handle_at 371 +#define __NR_open_by_handle_at 372 +#define __NR_clock_adjtime 373 +#define __NR_syncfs 374 +#define __NR_setns 375 +#define __NR_sendmmsg 376 +#define __NR_process_vm_readv 377 +#define __NR_process_vm_writev 378 +#define __NR_kcmp 379 +#define __NR_finit_module 380 +#define __NR_sched_setattr 381 +#define __NR_sched_getattr 382 +#define __NR_renameat2 383 +#define __NR_seccomp 384 +#define __NR_getrandom 385 +#define __NR_memfd_create 386 +#define __NR_bpf 387 +#define __NR_execveat 388 +#define __NR_userfaultfd 389 +#define __NR_membarrier 390 +#define __NR_mlock2 391 +#define __NR_copy_file_range 392 +#define __NR_preadv2 393 +#define __NR_pwritev2 394 +#define __NR_pkey_mprotect 395 +#define __NR_pkey_alloc 396 +#define __NR_pkey_free 397 +#define __NR_statx 398 +#define __NR_io_pgetevents 399 +#define __NR_rseq 400 + +#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/kernel/.gitignore b/arch/microblaze/kernel/.gitignore new file mode 100644 index 000000000..c5f676c3c --- /dev/null +++ b/arch/microblaze/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile new file mode 100644 index 000000000..dd7163743 --- /dev/null +++ b/arch/microblaze/kernel/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile +# + +ifdef CONFIG_FUNCTION_TRACER +# Do not trace early boot code and low level code +CFLAGS_REMOVE_timer.o = -pg +CFLAGS_REMOVE_intc.o = -pg +CFLAGS_REMOVE_early_printk.o = -pg +CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_process.o = -pg +endif + +extra-y := head.o vmlinux.lds + +obj-y += dma.o exceptions.o \ + hw_exception_handler.o irq.o \ + process.o prom.o ptrace.o \ + reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o + +obj-y += cpu/ + +obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o +obj-$(CONFIG_MMU) += misc.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o +obj-$(CONFIG_KGDB) += kgdb.o + +obj-y += entry$(MMU).o diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c new file mode 100644 index 000000000..c1b459c97 --- /dev/null +++ b/arch/microblaze/kernel/asm-offsets.c @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/stddef.h> +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <linux/ptrace.h> +#include <linux/hardirq.h> +#include <linux/thread_info.h> +#include <linux/kbuild.h> +#include <asm/cpuinfo.h> + +int main(int argc, char *argv[]) +{ + /* struct pt_regs */ + DEFINE(PT_SIZE, sizeof(struct pt_regs)); + DEFINE(PT_MSR, offsetof(struct pt_regs, msr)); + DEFINE(PT_EAR, offsetof(struct pt_regs, ear)); + DEFINE(PT_ESR, offsetof(struct pt_regs, esr)); + DEFINE(PT_FSR, offsetof(struct pt_regs, fsr)); + DEFINE(PT_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_R0, offsetof(struct pt_regs, r0)); + DEFINE(PT_R1, offsetof(struct pt_regs, r1)); + DEFINE(PT_R2, offsetof(struct pt_regs, r2)); + DEFINE(PT_R3, offsetof(struct pt_regs, r3)); + DEFINE(PT_R4, offsetof(struct pt_regs, r4)); + DEFINE(PT_R5, offsetof(struct pt_regs, r5)); + DEFINE(PT_R6, offsetof(struct pt_regs, r6)); + DEFINE(PT_R7, offsetof(struct pt_regs, r7)); + DEFINE(PT_R8, offsetof(struct pt_regs, r8)); + DEFINE(PT_R9, offsetof(struct pt_regs, r9)); + DEFINE(PT_R10, offsetof(struct pt_regs, r10)); + DEFINE(PT_R11, offsetof(struct pt_regs, r11)); + DEFINE(PT_R12, offsetof(struct pt_regs, r12)); + DEFINE(PT_R13, offsetof(struct pt_regs, r13)); + DEFINE(PT_R14, offsetof(struct pt_regs, r14)); + DEFINE(PT_R15, offsetof(struct pt_regs, r15)); + DEFINE(PT_R16, offsetof(struct pt_regs, r16)); + DEFINE(PT_R17, offsetof(struct pt_regs, r17)); + DEFINE(PT_R18, offsetof(struct pt_regs, r18)); + DEFINE(PT_R19, offsetof(struct pt_regs, r19)); + DEFINE(PT_R20, offsetof(struct pt_regs, r20)); + DEFINE(PT_R21, offsetof(struct pt_regs, r21)); + DEFINE(PT_R22, offsetof(struct pt_regs, r22)); + DEFINE(PT_R23, offsetof(struct pt_regs, r23)); + DEFINE(PT_R24, offsetof(struct pt_regs, r24)); + DEFINE(PT_R25, offsetof(struct pt_regs, r25)); + DEFINE(PT_R26, offsetof(struct pt_regs, r26)); + DEFINE(PT_R27, offsetof(struct pt_regs, r27)); + DEFINE(PT_R28, offsetof(struct pt_regs, r28)); + DEFINE(PT_R29, offsetof(struct pt_regs, r29)); + DEFINE(PT_R30, offsetof(struct pt_regs, r30)); + DEFINE(PT_R31, offsetof(struct pt_regs, r31)); + DEFINE(PT_MODE, offsetof(struct pt_regs, pt_mode)); + BLANK(); + + /* Magic offsets for PTRACE PEEK/POKE etc */ + DEFINE(PT_TEXT_ADDR, sizeof(struct pt_regs) + 1); + DEFINE(PT_TEXT_LEN, sizeof(struct pt_regs) + 2); + DEFINE(PT_DATA_ADDR, sizeof(struct pt_regs) + 3); + BLANK(); + + /* struct task_struct */ + DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); +#ifdef CONFIG_MMU + DEFINE(TASK_STATE, offsetof(struct task_struct, state)); + DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); + DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); + DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); + DEFINE(TASK_MM, offsetof(struct task_struct, mm)); + DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + DEFINE(TASK_PID, offsetof(struct task_struct, pid)); + DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); + DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); + BLANK(); + + DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); + BLANK(); +#endif + + /* struct thread_info */ + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); + DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); + BLANK(); + + /* struct cpu_context */ + DEFINE(CC_R1, offsetof(struct cpu_context, r1)); /* r1 */ + DEFINE(CC_R2, offsetof(struct cpu_context, r2)); + /* dedicated registers */ + DEFINE(CC_R13, offsetof(struct cpu_context, r13)); + DEFINE(CC_R14, offsetof(struct cpu_context, r14)); + DEFINE(CC_R15, offsetof(struct cpu_context, r15)); + DEFINE(CC_R16, offsetof(struct cpu_context, r16)); + DEFINE(CC_R17, offsetof(struct cpu_context, r17)); + DEFINE(CC_R18, offsetof(struct cpu_context, r18)); + /* non-volatile registers */ + DEFINE(CC_R19, offsetof(struct cpu_context, r19)); + DEFINE(CC_R20, offsetof(struct cpu_context, r20)); + DEFINE(CC_R21, offsetof(struct cpu_context, r21)); + DEFINE(CC_R22, offsetof(struct cpu_context, r22)); + DEFINE(CC_R23, offsetof(struct cpu_context, r23)); + DEFINE(CC_R24, offsetof(struct cpu_context, r24)); + DEFINE(CC_R25, offsetof(struct cpu_context, r25)); + DEFINE(CC_R26, offsetof(struct cpu_context, r26)); + DEFINE(CC_R27, offsetof(struct cpu_context, r27)); + DEFINE(CC_R28, offsetof(struct cpu_context, r28)); + DEFINE(CC_R29, offsetof(struct cpu_context, r29)); + DEFINE(CC_R30, offsetof(struct cpu_context, r30)); + /* special purpose registers */ + DEFINE(CC_MSR, offsetof(struct cpu_context, msr)); + DEFINE(CC_EAR, offsetof(struct cpu_context, ear)); + DEFINE(CC_ESR, offsetof(struct cpu_context, esr)); + DEFINE(CC_FSR, offsetof(struct cpu_context, fsr)); + BLANK(); + + return 0; +} diff --git a/arch/microblaze/kernel/cpu/Makefile b/arch/microblaze/kernel/cpu/Makefile new file mode 100644 index 000000000..059afc75a --- /dev/null +++ b/arch/microblaze/kernel/cpu/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Build the appropriate CPU version support +# + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_cache.o = -pg +endif + +ccflags-y := -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \ + -DCPU_REV=$(CPU_REV) + +obj-y += cache.o cpuinfo.o cpuinfo-pvr-full.o cpuinfo-static.o mb.o pvr.o diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c new file mode 100644 index 000000000..dcba53803 --- /dev/null +++ b/arch/microblaze/kernel/cpu/cache.c @@ -0,0 +1,656 @@ +/* + * Cache control for MicroBlaze cache memories + * + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#include <asm/cacheflush.h> +#include <linux/cache.h> +#include <asm/cpuinfo.h> +#include <asm/pvr.h> + +static inline void __enable_icache_msr(void) +{ + __asm__ __volatile__ (" msrset r0, %0;" \ + "nop;" \ + : : "i" (MSR_ICE) : "memory"); +} + +static inline void __disable_icache_msr(void) +{ + __asm__ __volatile__ (" msrclr r0, %0;" \ + "nop;" \ + : : "i" (MSR_ICE) : "memory"); +} + +static inline void __enable_dcache_msr(void) +{ + __asm__ __volatile__ (" msrset r0, %0;" \ + "nop;" \ + : : "i" (MSR_DCE) : "memory"); +} + +static inline void __disable_dcache_msr(void) +{ + __asm__ __volatile__ (" msrclr r0, %0;" \ + "nop; " \ + : : "i" (MSR_DCE) : "memory"); +} + +static inline void __enable_icache_nomsr(void) +{ + __asm__ __volatile__ (" mfs r12, rmsr;" \ + "nop;" \ + "ori r12, r12, %0;" \ + "mts rmsr, r12;" \ + "nop;" \ + : : "i" (MSR_ICE) : "memory", "r12"); +} + +static inline void __disable_icache_nomsr(void) +{ + __asm__ __volatile__ (" mfs r12, rmsr;" \ + "nop;" \ + "andi r12, r12, ~%0;" \ + "mts rmsr, r12;" \ + "nop;" \ + : : "i" (MSR_ICE) : "memory", "r12"); +} + +static inline void __enable_dcache_nomsr(void) +{ + __asm__ __volatile__ (" mfs r12, rmsr;" \ + "nop;" \ + "ori r12, r12, %0;" \ + "mts rmsr, r12;" \ + "nop;" \ + : : "i" (MSR_DCE) : "memory", "r12"); +} + +static inline void __disable_dcache_nomsr(void) +{ + __asm__ __volatile__ (" mfs r12, rmsr;" \ + "nop;" \ + "andi r12, r12, ~%0;" \ + "mts rmsr, r12;" \ + "nop;" \ + : : "i" (MSR_DCE) : "memory", "r12"); +} + + +/* Helper macro for computing the limits of cache range loops + * + * End address can be unaligned which is OK for C implementation. + * ASM implementation align it in ASM macros + */ +#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ +do { \ + int align = ~(cache_line_length - 1); \ + if (start < UINT_MAX - cache_size) \ + end = min(start + cache_size, end); \ + start &= align; \ +} while (0) + +/* + * Helper macro to loop over the specified cache_size/line_length and + * execute 'op' on that cacheline + */ +#define CACHE_ALL_LOOP(cache_size, line_length, op) \ +do { \ + unsigned int len = cache_size - line_length; \ + int step = -line_length; \ + WARN_ON(step >= 0); \ + \ + __asm__ __volatile__ (" 1: " #op " %0, r0;" \ + "bgtid %0, 1b;" \ + "addk %0, %0, %1;" \ + : : "r" (len), "r" (step) \ + : "memory"); \ +} while (0) + +/* Used for wdc.flush/clear which can use rB for offset which is not possible + * to use for simple wdc or wic. + * + * start address is cache aligned + * end address is not aligned, if end is aligned then I have to subtract + * cacheline length because I can't flush/invalidate the next cacheline. + * If is not, I align it because I will flush/invalidate whole line. + */ +#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ +do { \ + int step = -line_length; \ + int align = ~(line_length - 1); \ + int count; \ + end = ((end & align) == end) ? end - line_length : end & align; \ + count = end - start; \ + WARN_ON(count < 0); \ + \ + __asm__ __volatile__ (" 1: " #op " %0, %1;" \ + "bgtid %1, 1b;" \ + "addk %1, %1, %2;" \ + : : "r" (start), "r" (count), \ + "r" (step) : "memory"); \ +} while (0) + +/* It is used only first parameter for OP - for wic, wdc */ +#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ +do { \ + unsigned int volatile temp = 0; \ + unsigned int align = ~(line_length - 1); \ + end = ((end & align) == end) ? end - line_length : end & align; \ + WARN_ON(end < start); \ + \ + __asm__ __volatile__ (" 1: " #op " %1, r0;" \ + "cmpu %0, %1, %2;" \ + "bgtid %0, 1b;" \ + "addk %1, %1, %3;" \ + : : "r" (temp), "r" (start), "r" (end), \ + "r" (line_length) : "memory"); \ +} while (0) + +#define ASM_LOOP + +static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.icache_line_length, cpuinfo.icache_size); + + local_irq_save(flags); + __disable_icache_msr(); + +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif + __enable_icache_msr(); + local_irq_restore(flags); +} + +static void __flush_icache_range_nomsr_irq(unsigned long start, + unsigned long end) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.icache_line_length, cpuinfo.icache_size); + + local_irq_save(flags); + __disable_icache_nomsr(); + +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif + + __enable_icache_nomsr(); + local_irq_restore(flags); +} + +static void __flush_icache_range_noirq(unsigned long start, + unsigned long end) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.icache_line_length, cpuinfo.icache_size); +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif +} + +static void __flush_icache_all_msr_irq(void) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); + + local_irq_save(flags); + __disable_icache_msr(); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif + __enable_icache_msr(); + local_irq_restore(flags); +} + +static void __flush_icache_all_nomsr_irq(void) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); + + local_irq_save(flags); + __disable_icache_nomsr(); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif + __enable_icache_nomsr(); + local_irq_restore(flags); +} + +static void __flush_icache_all_noirq(void) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif +} + +static void __invalidate_dcache_all_msr_irq(void) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); + + local_irq_save(flags); + __disable_dcache_msr(); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif + __enable_dcache_msr(); + local_irq_restore(flags); +} + +static void __invalidate_dcache_all_nomsr_irq(void) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); + + local_irq_save(flags); + __disable_dcache_nomsr(); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif + __enable_dcache_nomsr(); + local_irq_restore(flags); +} + +static void __invalidate_dcache_all_noirq_wt(void) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif +} + +/* + * FIXME It is blindly invalidation as is expected + * but can't be called on noMMU in microblaze_cache_init below + * + * MS: noMMU kernel won't boot if simple wdc is used + * The reason should be that there are discared data which kernel needs + */ +static void __invalidate_dcache_all_wb(void) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, + wdc); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif +} + +static void __invalidate_dcache_range_wb(unsigned long start, + unsigned long end) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); +#else + for (i = start; i < end; i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc.clear %0, r0;" \ + : : "r" (i)); +#endif +} + +static void __invalidate_dcache_range_nomsr_wt(unsigned long start, + unsigned long end) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); + +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif +} + +static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, + unsigned long end) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); + + local_irq_save(flags); + __disable_dcache_msr(); + +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif + + __enable_dcache_msr(); + local_irq_restore(flags); +} + +static void __invalidate_dcache_range_nomsr_irq(unsigned long start, + unsigned long end) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); + + local_irq_save(flags); + __disable_dcache_nomsr(); + +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif + + __enable_dcache_nomsr(); + local_irq_restore(flags); +} + +static void __flush_dcache_all_wb(void) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, + wdc.flush); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc.flush %0, r0;" \ + : : "r" (i)); +#endif +} + +static void __flush_dcache_range_wb(unsigned long start, unsigned long end) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); +#else + for (i = start; i < end; i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc.flush %0, r0;" \ + : : "r" (i)); +#endif +} + +/* struct for wb caches and for wt caches */ +struct scache *mbc; + +/* new wb cache model */ +static const struct scache wb_msr = { + .ie = __enable_icache_msr, + .id = __disable_icache_msr, + .ifl = __flush_icache_all_noirq, + .iflr = __flush_icache_range_noirq, + .iin = __flush_icache_all_noirq, + .iinr = __flush_icache_range_noirq, + .de = __enable_dcache_msr, + .dd = __disable_dcache_msr, + .dfl = __flush_dcache_all_wb, + .dflr = __flush_dcache_range_wb, + .din = __invalidate_dcache_all_wb, + .dinr = __invalidate_dcache_range_wb, +}; + +/* There is only difference in ie, id, de, dd functions */ +static const struct scache wb_nomsr = { + .ie = __enable_icache_nomsr, + .id = __disable_icache_nomsr, + .ifl = __flush_icache_all_noirq, + .iflr = __flush_icache_range_noirq, + .iin = __flush_icache_all_noirq, + .iinr = __flush_icache_range_noirq, + .de = __enable_dcache_nomsr, + .dd = __disable_dcache_nomsr, + .dfl = __flush_dcache_all_wb, + .dflr = __flush_dcache_range_wb, + .din = __invalidate_dcache_all_wb, + .dinr = __invalidate_dcache_range_wb, +}; + +/* Old wt cache model with disabling irq and turn off cache */ +static const struct scache wt_msr = { + .ie = __enable_icache_msr, + .id = __disable_icache_msr, + .ifl = __flush_icache_all_msr_irq, + .iflr = __flush_icache_range_msr_irq, + .iin = __flush_icache_all_msr_irq, + .iinr = __flush_icache_range_msr_irq, + .de = __enable_dcache_msr, + .dd = __disable_dcache_msr, + .dfl = __invalidate_dcache_all_msr_irq, + .dflr = __invalidate_dcache_range_msr_irq_wt, + .din = __invalidate_dcache_all_msr_irq, + .dinr = __invalidate_dcache_range_msr_irq_wt, +}; + +static const struct scache wt_nomsr = { + .ie = __enable_icache_nomsr, + .id = __disable_icache_nomsr, + .ifl = __flush_icache_all_nomsr_irq, + .iflr = __flush_icache_range_nomsr_irq, + .iin = __flush_icache_all_nomsr_irq, + .iinr = __flush_icache_range_nomsr_irq, + .de = __enable_dcache_nomsr, + .dd = __disable_dcache_nomsr, + .dfl = __invalidate_dcache_all_nomsr_irq, + .dflr = __invalidate_dcache_range_nomsr_irq, + .din = __invalidate_dcache_all_nomsr_irq, + .dinr = __invalidate_dcache_range_nomsr_irq, +}; + +/* New wt cache model for newer Microblaze versions */ +static const struct scache wt_msr_noirq = { + .ie = __enable_icache_msr, + .id = __disable_icache_msr, + .ifl = __flush_icache_all_noirq, + .iflr = __flush_icache_range_noirq, + .iin = __flush_icache_all_noirq, + .iinr = __flush_icache_range_noirq, + .de = __enable_dcache_msr, + .dd = __disable_dcache_msr, + .dfl = __invalidate_dcache_all_noirq_wt, + .dflr = __invalidate_dcache_range_nomsr_wt, + .din = __invalidate_dcache_all_noirq_wt, + .dinr = __invalidate_dcache_range_nomsr_wt, +}; + +static const struct scache wt_nomsr_noirq = { + .ie = __enable_icache_nomsr, + .id = __disable_icache_nomsr, + .ifl = __flush_icache_all_noirq, + .iflr = __flush_icache_range_noirq, + .iin = __flush_icache_all_noirq, + .iinr = __flush_icache_range_noirq, + .de = __enable_dcache_nomsr, + .dd = __disable_dcache_nomsr, + .dfl = __invalidate_dcache_all_noirq_wt, + .dflr = __invalidate_dcache_range_nomsr_wt, + .din = __invalidate_dcache_all_noirq_wt, + .dinr = __invalidate_dcache_range_nomsr_wt, +}; + +/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */ +#define CPUVER_7_20_A 0x0c +#define CPUVER_7_20_D 0x0f + +void microblaze_cache_init(void) +{ + if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) { + if (cpuinfo.dcache_wb) { + pr_info("wb_msr\n"); + mbc = (struct scache *)&wb_msr; + if (cpuinfo.ver_code <= CPUVER_7_20_D) { + /* MS: problem with signal handling - hw bug */ + pr_info("WB won't work properly\n"); + } + } else { + if (cpuinfo.ver_code >= CPUVER_7_20_A) { + pr_info("wt_msr_noirq\n"); + mbc = (struct scache *)&wt_msr_noirq; + } else { + pr_info("wt_msr\n"); + mbc = (struct scache *)&wt_msr; + } + } + } else { + if (cpuinfo.dcache_wb) { + pr_info("wb_nomsr\n"); + mbc = (struct scache *)&wb_nomsr; + if (cpuinfo.ver_code <= CPUVER_7_20_D) { + /* MS: problem with signal handling - hw bug */ + pr_info("WB won't work properly\n"); + } + } else { + if (cpuinfo.ver_code >= CPUVER_7_20_A) { + pr_info("wt_nomsr_noirq\n"); + mbc = (struct scache *)&wt_nomsr_noirq; + } else { + pr_info("wt_nomsr\n"); + mbc = (struct scache *)&wt_nomsr; + } + } + } + /* + * FIXME Invalidation is done in U-BOOT + * WT cache: Data is already written to main memory + * WB cache: Discard data on noMMU which caused that kernel doesn't boot + */ + /* invalidate_dcache(); */ + enable_dcache(); + + invalidate_icache(); + enable_icache(); +} diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c new file mode 100644 index 000000000..a32daec96 --- /dev/null +++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c @@ -0,0 +1,115 @@ +/* + * Support for MicroBlaze PVR (processor version register) + * + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/string.h> +#include <asm/pvr.h> +#include <asm/cpuinfo.h> + +/* + * Helper macro to map between fields in our struct cpuinfo, and + * the PVR macros in pvr.h. + */ + +#define CI(c, p) { ci->c = PVR_##p(pvr); } + +#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) +#define err_printk(x) \ + early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); +#else +#define err_printk(x) \ + pr_info("ERROR: Microblaze " x "-different for PVR and DTS\n"); +#endif + +void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) +{ + struct pvr_s pvr; + u32 temp; /* for saving temp value */ + get_pvr(&pvr); + + CI(ver_code, VERSION); + if (!ci->ver_code) { + pr_err("ERROR: MB has broken PVR regs -> use DTS setting\n"); + return; + } + + temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) | + PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr); + if (ci->use_instr != temp) + err_printk("BARREL, MSR, PCMP or DIV"); + ci->use_instr = temp; + + temp = PVR_USE_HW_MUL(pvr) | PVR_USE_MUL64(pvr); + if (ci->use_mult != temp) + err_printk("HW_MUL"); + ci->use_mult = temp; + + temp = PVR_USE_FPU(pvr) | PVR_USE_FPU2(pvr); + if (ci->use_fpu != temp) + err_printk("HW_FPU"); + ci->use_fpu = temp; + + ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) | + PVR_UNALIGNED_EXCEPTION(pvr) | + PVR_ILL_OPCODE_EXCEPTION(pvr) | + PVR_IOPB_BUS_EXCEPTION(pvr) | + PVR_DOPB_BUS_EXCEPTION(pvr) | + PVR_DIV_ZERO_EXCEPTION(pvr) | + PVR_FPU_EXCEPTION(pvr) | + PVR_FSL_EXCEPTION(pvr); + + CI(pvr_user1, USER1); + CI(pvr_user2, USER2); + + CI(mmu, USE_MMU); + CI(mmu_privins, MMU_PRIVINS); + CI(endian, ENDIAN); + + CI(use_icache, USE_ICACHE); + CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); + CI(icache_write, ICACHE_ALLOW_WR); + ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2; + CI(icache_size, ICACHE_BYTE_SIZE); + CI(icache_base, ICACHE_BASEADDR); + CI(icache_high, ICACHE_HIGHADDR); + + CI(use_dcache, USE_DCACHE); + CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); + CI(dcache_write, DCACHE_ALLOW_WR); + ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2; + CI(dcache_size, DCACHE_BYTE_SIZE); + CI(dcache_base, DCACHE_BASEADDR); + CI(dcache_high, DCACHE_HIGHADDR); + + temp = PVR_DCACHE_USE_WRITEBACK(pvr); + if (ci->dcache_wb != temp) + err_printk("DCACHE WB"); + ci->dcache_wb = temp; + + CI(use_dopb, D_OPB); + CI(use_iopb, I_OPB); + CI(use_dlmb, D_LMB); + CI(use_ilmb, I_LMB); + CI(num_fsl, FSL_LINKS); + + CI(irq_edge, INTERRUPT_IS_EDGE); + CI(irq_positive, EDGE_IS_POSITIVE); + + CI(area_optimised, AREA_OPTIMISED); + + CI(hw_debug, DEBUG_ENABLED); + CI(num_pc_brk, NUMBER_OF_PC_BRK); + CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK); + CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK); + + CI(fpga_family_code, TARGET_FAMILY); +} diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c new file mode 100644 index 000000000..85dbda4a0 --- /dev/null +++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/string.h> +#include <asm/cpuinfo.h> +#include <asm/pvr.h> + +static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY; +static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER; + +#define err_printk(x) \ + early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n"); + +void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) +{ + u32 i = 0; + + ci->use_instr = + (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | + (fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) | + (fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) | + (fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0); + if (CONFIG_XILINX_MICROBLAZE0_USE_BARREL) + i |= PVR0_USE_BARREL_MASK; + if (CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR) + i |= PVR2_USE_MSR_INSTR; + if (CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) + i |= PVR2_USE_PCMP_INSTR; + if (CONFIG_XILINX_MICROBLAZE0_USE_DIV) + i |= PVR0_USE_DIV_MASK; + if (ci->use_instr != i) + err_printk("BARREL, MSR, PCMP or DIV"); + + ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul"); + if (ci->use_mult != CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL) + err_printk("HW_MUL"); + ci->use_mult = + (ci->use_mult > 1 ? + (PVR2_USE_MUL64_MASK | PVR0_USE_HW_MUL_MASK) : + (ci->use_mult == 1 ? PVR0_USE_HW_MUL_MASK : 0)); + + ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); + if (ci->use_fpu != CONFIG_XILINX_MICROBLAZE0_USE_FPU) + err_printk("HW_FPU"); + ci->use_fpu = (ci->use_fpu > 1 ? + (PVR2_USE_FPU2_MASK | PVR0_USE_FPU_MASK) : + (ci->use_fpu == 1 ? PVR0_USE_FPU_MASK : 0)); + + ci->use_exc = + (fcpu(cpu, "xlnx,unaligned-exceptions") ? + PVR2_UNALIGNED_EXC_MASK : 0) | + (fcpu(cpu, "xlnx,ill-opcode-exception") ? + PVR2_ILL_OPCODE_EXC_MASK : 0) | + (fcpu(cpu, "xlnx,iopb-bus-exception") ? + PVR2_IOPB_BUS_EXC_MASK : 0) | + (fcpu(cpu, "xlnx,dopb-bus-exception") ? + PVR2_DOPB_BUS_EXC_MASK : 0) | + (fcpu(cpu, "xlnx,div-zero-exception") ? + PVR2_DIV_ZERO_EXC_MASK : 0) | + (fcpu(cpu, "xlnx,fpu-exception") ? PVR2_FPU_EXC_MASK : 0) | + (fcpu(cpu, "xlnx,fsl-exception") ? PVR2_USE_EXTEND_FSL : 0); + + ci->use_icache = fcpu(cpu, "xlnx,use-icache"); + ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits"); + ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr"); + ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2; + if (!ci->icache_line_length) { + if (fcpu(cpu, "xlnx,icache-use-fsl")) + ci->icache_line_length = 4 << 2; + else + ci->icache_line_length = 1 << 2; + } + ci->icache_size = fcpu(cpu, "i-cache-size"); + ci->icache_base = fcpu(cpu, "i-cache-baseaddr"); + ci->icache_high = fcpu(cpu, "i-cache-highaddr"); + + ci->use_dcache = fcpu(cpu, "xlnx,use-dcache"); + ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag"); + ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr"); + ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2; + if (!ci->dcache_line_length) { + if (fcpu(cpu, "xlnx,dcache-use-fsl")) + ci->dcache_line_length = 4 << 2; + else + ci->dcache_line_length = 1 << 2; + } + ci->dcache_size = fcpu(cpu, "d-cache-size"); + ci->dcache_base = fcpu(cpu, "d-cache-baseaddr"); + ci->dcache_high = fcpu(cpu, "d-cache-highaddr"); + ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback"); + + ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); + ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); + ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb"); + ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb"); + + ci->num_fsl = fcpu(cpu, "xlnx,fsl-links"); + ci->irq_edge = fcpu(cpu, "xlnx,interrupt-is-edge"); + ci->irq_positive = fcpu(cpu, "xlnx,edge-is-positive"); + ci->area_optimised = 0; + + ci->hw_debug = fcpu(cpu, "xlnx,debug-enabled"); + ci->num_pc_brk = fcpu(cpu, "xlnx,number-of-pc-brk"); + ci->num_rd_brk = fcpu(cpu, "xlnx,number-of-rd-addr-brk"); + ci->num_wr_brk = fcpu(cpu, "xlnx,number-of-wr-addr-brk"); + + ci->pvr_user1 = fcpu(cpu, "xlnx,pvr-user1"); + ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2"); + + ci->mmu = fcpu(cpu, "xlnx,use-mmu"); + ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr"); + ci->endian = fcpu(cpu, "xlnx,endianness"); + + ci->ver_code = 0; + ci->fpga_family_code = 0; + + /* Do various fixups based on CPU version and FPGA family strings */ + + /* Resolved the CPU version code */ + for (i = 0; cpu_ver_lookup[i].s != NULL; i++) { + if (strcmp(cpu_ver_lookup[i].s, cpu_ver_string) == 0) + ci->ver_code = cpu_ver_lookup[i].k; + } + + /* Resolved the fpga family code */ + for (i = 0; family_string_lookup[i].s != NULL; i++) { + if (strcmp(family_string_lookup[i].s, family_string) == 0) + ci->fpga_family_code = family_string_lookup[i].k; + } + + /* FIXME - mb3 and spartan2 do not exist in PVR */ + /* This is mb3 and on a non Spartan2 */ + if (ci->ver_code == 0x20 && ci->fpga_family_code != 0xf0) + /* Hardware Multiplier in use */ + ci->use_mult = 1; +} diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c new file mode 100644 index 000000000..96b3f26d1 --- /dev/null +++ b/arch/microblaze/kernel/cpu/cpuinfo.c @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/clk.h> +#include <linux/init.h> +#include <asm/cpuinfo.h> +#include <asm/pvr.h> + +const struct cpu_ver_key cpu_ver_lookup[] = { + /* These key value are as per MBV field in PVR0 */ + {"5.00.a", 0x01}, + {"5.00.b", 0x02}, + {"5.00.c", 0x03}, + {"6.00.a", 0x04}, + {"6.00.b", 0x06}, + {"7.00.a", 0x05}, + {"7.00.b", 0x07}, + {"7.10.a", 0x08}, + {"7.10.b", 0x09}, + {"7.10.c", 0x0a}, + {"7.10.d", 0x0b}, + {"7.20.a", 0x0c}, + {"7.20.b", 0x0d}, + {"7.20.c", 0x0e}, + {"7.20.d", 0x0f}, + {"7.30.a", 0x10}, + {"7.30.b", 0x11}, + {"8.00.a", 0x12}, + {"8.00.b", 0x13}, + {"8.10.a", 0x14}, + {"8.20.a", 0x15}, + {"8.20.b", 0x16}, + {"8.30.a", 0x17}, + {"8.40.a", 0x18}, + {"8.40.b", 0x19}, + {"8.50.a", 0x1a}, + {"8.50.b", 0x1c}, + {"8.50.c", 0x1e}, + {"9.0", 0x1b}, + {"9.1", 0x1d}, + {"9.2", 0x1f}, + {"9.3", 0x20}, + {"9.4", 0x21}, + {"9.5", 0x22}, + {"9.6", 0x23}, + {"10.0", 0x24}, + {NULL, 0}, +}; + +/* + * FIXME Not sure if the actual key is defined by Xilinx in the PVR + */ +const struct family_string_key family_string_lookup[] = { + {"virtex2", 0x4}, + {"virtex2pro", 0x5}, + {"spartan3", 0x6}, + {"virtex4", 0x7}, + {"virtex5", 0x8}, + {"spartan3e", 0x9}, + {"spartan3a", 0xa}, + {"spartan3an", 0xb}, + {"spartan3adsp", 0xc}, + {"spartan6", 0xd}, + {"virtex6", 0xe}, + {"virtex7", 0xf}, + /* FIXME There is no key code defined for spartan2 */ + {"spartan2", 0xf0}, + {"kintex7", 0x10}, + {"artix7", 0x11}, + {"zynq7000", 0x12}, + {"UltraScale Virtex", 0x13}, + {"UltraScale Kintex", 0x14}, + {"UltraScale+ Zynq", 0x15}, + {"UltraScale+ Virtex", 0x16}, + {"UltraScale+ Kintex", 0x17}, + {"Spartan7", 0x18}, + {NULL, 0}, +}; + +struct cpuinfo cpuinfo; +static struct device_node *cpu; + +void __init setup_cpuinfo(void) +{ + cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu"); + if (!cpu) + pr_err("You don't have cpu!!!\n"); + + pr_info("%s: initialising\n", __func__); + + switch (cpu_has_pvr()) { + case 0: + pr_warn("%s: No PVR support. Using static CPU info from FDT\n", + __func__); + set_cpuinfo_static(&cpuinfo, cpu); + break; +/* FIXME I found weird behavior with MB 7.00.a/b 7.10.a + * please do not use FULL PVR with MMU */ + case 1: + pr_info("%s: Using full CPU PVR support\n", + __func__); + set_cpuinfo_static(&cpuinfo, cpu); + set_cpuinfo_pvr_full(&cpuinfo, cpu); + break; + default: + pr_warn("%s: Unsupported PVR setting\n", __func__); + set_cpuinfo_static(&cpuinfo, cpu); + } + + if (cpuinfo.mmu_privins) + pr_warn("%s: Stream instructions enabled" + " - USERSPACE CAN LOCK THIS KERNEL!\n", __func__); +} + +void __init setup_cpuinfo_clk(void) +{ + struct clk *clk; + + clk = of_clk_get(cpu, 0); + if (IS_ERR(clk)) { + pr_err("ERROR: CPU CCF input clock not found\n"); + /* take timebase-frequency from DTS */ + cpuinfo.cpu_clock_freq = fcpu(cpu, "timebase-frequency"); + } else { + cpuinfo.cpu_clock_freq = clk_get_rate(clk); + } + + if (!cpuinfo.cpu_clock_freq) { + pr_err("ERROR: CPU clock frequency not setup\n"); + BUG(); + } +} diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c new file mode 100644 index 000000000..9581d194d --- /dev/null +++ b/arch/microblaze/kernel/cpu/mb.c @@ -0,0 +1,158 @@ +/* + * CPU-version specific code + * + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2006-2009 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/string.h> +#include <linux/seq_file.h> +#include <linux/cpu.h> +#include <linux/initrd.h> + +#include <linux/bug.h> +#include <asm/cpuinfo.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <asm/page.h> +#include <linux/param.h> +#include <asm/pvr.h> +#include <asm/sections.h> +#include <asm/setup.h> + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + char *fpga_family = "Unknown"; + char *cpu_ver = "Unknown"; + int i; + + /* Denormalised to get the fpga family string */ + for (i = 0; family_string_lookup[i].s != NULL; i++) { + if (cpuinfo.fpga_family_code == family_string_lookup[i].k) { + fpga_family = (char *)family_string_lookup[i].s; + break; + } + } + + /* Denormalised to get the hw version string */ + for (i = 0; cpu_ver_lookup[i].s != NULL; i++) { + if (cpuinfo.ver_code == cpu_ver_lookup[i].k) { + cpu_ver = (char *)cpu_ver_lookup[i].s; + break; + } + } + + seq_printf(m, + "CPU-Family: MicroBlaze\n" + "FPGA-Arch: %s\n" + "CPU-Ver: %s, %s endian\n" + "CPU-MHz: %d.%02d\n" + "BogoMips: %lu.%02lu\n", + fpga_family, + cpu_ver, + cpuinfo.endian ? "little" : "big", + cpuinfo.cpu_clock_freq / 1000000, + cpuinfo.cpu_clock_freq % 1000000, + loops_per_jiffy / (500000 / HZ), + (loops_per_jiffy / (5000 / HZ)) % 100); + + seq_printf(m, + "HW:\n Shift:\t\t%s\n" + " MSR:\t\t%s\n" + " PCMP:\t\t%s\n" + " DIV:\t\t%s\n", + (cpuinfo.use_instr & PVR0_USE_BARREL_MASK) ? "yes" : "no", + (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) ? "yes" : "no", + (cpuinfo.use_instr & PVR2_USE_PCMP_INSTR) ? "yes" : "no", + (cpuinfo.use_instr & PVR0_USE_DIV_MASK) ? "yes" : "no"); + + seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu); + + seq_printf(m, + " MUL:\t\t%s\n" + " FPU:\t\t%s\n", + (cpuinfo.use_mult & PVR2_USE_MUL64_MASK) ? "v2" : + (cpuinfo.use_mult & PVR0_USE_HW_MUL_MASK) ? "v1" : "no", + (cpuinfo.use_fpu & PVR2_USE_FPU2_MASK) ? "v2" : + (cpuinfo.use_fpu & PVR0_USE_FPU_MASK) ? "v1" : "no"); + + seq_printf(m, + " Exc:\t\t%s%s%s%s%s%s%s%s\n", + (cpuinfo.use_exc & PVR2_OPCODE_0x0_ILL_MASK) ? "op0x0 " : "", + (cpuinfo.use_exc & PVR2_UNALIGNED_EXC_MASK) ? "unal " : "", + (cpuinfo.use_exc & PVR2_ILL_OPCODE_EXC_MASK) ? "ill " : "", + (cpuinfo.use_exc & PVR2_IOPB_BUS_EXC_MASK) ? "iopb " : "", + (cpuinfo.use_exc & PVR2_DOPB_BUS_EXC_MASK) ? "dopb " : "", + (cpuinfo.use_exc & PVR2_DIV_ZERO_EXC_MASK) ? "zero " : "", + (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "", + (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : ""); + + seq_printf(m, + "Stream-insns:\t%sprivileged\n", + cpuinfo.mmu_privins ? "un" : ""); + + if (cpuinfo.use_icache) + seq_printf(m, + "Icache:\t\t%ukB\tline length:\t%dB\n", + cpuinfo.icache_size >> 10, + cpuinfo.icache_line_length); + else + seq_puts(m, "Icache:\t\tno\n"); + + if (cpuinfo.use_dcache) { + seq_printf(m, + "Dcache:\t\t%ukB\tline length:\t%dB\n", + cpuinfo.dcache_size >> 10, + cpuinfo.dcache_line_length); + seq_puts(m, "Dcache-Policy:\t"); + if (cpuinfo.dcache_wb) + seq_puts(m, "write-back\n"); + else + seq_puts(m, "write-through\n"); + } else { + seq_puts(m, "Dcache:\t\tno\n"); + } + + seq_printf(m, + "HW-Debug:\t%s\n", + cpuinfo.hw_debug ? "yes" : "no"); + + seq_printf(m, + "PVR-USR1:\t%02x\n" + "PVR-USR2:\t%08x\n", + cpuinfo.pvr_user1, + cpuinfo.pvr_user2); + + seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + int i = *pos; + + return i < NR_CPUS ? (void *) (i + 1) : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c new file mode 100644 index 000000000..8d0dc6db4 --- /dev/null +++ b/arch/microblaze/kernel/cpu/pvr.c @@ -0,0 +1,80 @@ +/* + * Support for MicroBlaze PVR (processor version register) + * + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/kernel.h> +#include <linux/compiler.h> +#include <asm/exceptions.h> +#include <asm/pvr.h> + +/* + * Until we get an assembler that knows about the pvr registers, + * this horrible cruft will have to do. + * That hardcoded opcode is mfs r3, rpvrNN + */ + +#define get_single_pvr(pvrid, val) \ +{ \ + register unsigned tmp __asm__("r3"); \ + tmp = 0x0; /* Prevent warning about unused */ \ + __asm__ __volatile__ ( \ + "mfs %0, rpvr" #pvrid ";" \ + : "=r" (tmp) : : "memory"); \ + val = tmp; \ +} + +/* + * Does the CPU support the PVR register? + * return value: + * 0: no PVR + * 1: simple PVR + * 2: full PVR + * + * This must work on all CPU versions, including those before the + * PVR was even an option. + */ + +int cpu_has_pvr(void) +{ + unsigned long flags; + unsigned pvr0; + + local_save_flags(flags); + + /* PVR bit in MSR tells us if there is any support */ + if (!(flags & PVR_MSR_BIT)) + return 0; + + get_single_pvr(0, pvr0); + pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0); + + if (pvr0 & PVR0_PVR_FULL_MASK) + return 1; + + /* for partial PVR use static cpuinfo */ + return 2; +} + +void get_pvr(struct pvr_s *p) +{ + get_single_pvr(0, p->pvr[0]); + get_single_pvr(1, p->pvr[1]); + get_single_pvr(2, p->pvr[2]); + get_single_pvr(3, p->pvr[3]); + get_single_pvr(4, p->pvr[4]); + get_single_pvr(5, p->pvr[5]); + get_single_pvr(6, p->pvr[6]); + get_single_pvr(7, p->pvr[7]); + get_single_pvr(8, p->pvr[8]); + get_single_pvr(9, p->pvr[9]); + get_single_pvr(10, p->pvr[10]); + get_single_pvr(11, p->pvr[11]); +} diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c new file mode 100644 index 000000000..71032cf64 --- /dev/null +++ b/arch/microblaze/kernel/dma.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2009-2010 PetaLogix + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation + * + * Provide default implementations of the DMA mapping callbacks for + * directly mapped busses. + */ + +#include <linux/device.h> +#include <linux/dma-noncoherent.h> +#include <linux/gfp.h> +#include <linux/dma-debug.h> +#include <linux/export.h> +#include <linux/bug.h> +#include <asm/cacheflush.h> + +static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size, + enum dma_data_direction direction) +{ + switch (direction) { + case DMA_TO_DEVICE: + case DMA_BIDIRECTIONAL: + flush_dcache_range(paddr, paddr + size); + break; + case DMA_FROM_DEVICE: + invalidate_dcache_range(paddr, paddr + size); + break; + default: + BUG(); + } +} + +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + __dma_sync(dev, paddr, size, dir); +} + +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + __dma_sync(dev, paddr, size, dir); +} + +int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, size_t size, + unsigned long attrs) +{ +#ifdef CONFIG_MMU + unsigned long user_count = vma_pages(vma); + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long off = vma->vm_pgoff; + unsigned long pfn; + + if (off >= count || user_count > (count - off)) + return -ENXIO; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + pfn = consistent_virt_to_pfn(cpu_addr); + return remap_pfn_range(vma, vma->vm_start, pfn + off, + vma->vm_end - vma->vm_start, vma->vm_page_prot); +#else + return -ENXIO; +#endif +} diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S new file mode 100644 index 000000000..7e394fc2c --- /dev/null +++ b/arch/microblaze/kernel/entry-nommu.S @@ -0,0 +1,622 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/linkage.h> +#include <asm/thread_info.h> +#include <linux/errno.h> +#include <asm/entry.h> +#include <asm/asm-offsets.h> +#include <asm/registers.h> +#include <asm/unistd.h> +#include <asm/percpu.h> +#include <asm/signal.h> + +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + .macro disable_irq + msrclr r0, MSR_IE + .endm + + .macro enable_irq + msrset r0, MSR_IE + .endm + + .macro clear_bip + msrclr r0, MSR_BIP + .endm +#else + .macro disable_irq + mfs r11, rmsr + andi r11, r11, ~MSR_IE + mts rmsr, r11 + .endm + + .macro enable_irq + mfs r11, rmsr + ori r11, r11, MSR_IE + mts rmsr, r11 + .endm + + .macro clear_bip + mfs r11, rmsr + andi r11, r11, ~MSR_BIP + mts rmsr, r11 + .endm +#endif + +ENTRY(_interrupt) + swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */ + swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */ + lwi r11, r0, PER_CPU(KM) /* load mode indicator */ + beqid r11, 1f + nop + brid 2f /* jump over */ + addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */ +1: /* switch to kernel stack */ + lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */ + lwi r1, r1, TS_THREAD_INFO /* get the thread info */ + /* calculate kernel stack pointer */ + addik r1, r1, THREAD_SIZE - PT_SIZE +2: + swi r11, r1, PT_MODE /* store the mode */ + lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ + swi r2, r1, PT_R2 + swi r3, r1, PT_R3 + swi r4, r1, PT_R4 + swi r5, r1, PT_R5 + swi r6, r1, PT_R6 + swi r7, r1, PT_R7 + swi r8, r1, PT_R8 + swi r9, r1, PT_R9 + swi r10, r1, PT_R10 + swi r11, r1, PT_R11 + swi r12, r1, PT_R12 + swi r13, r1, PT_R13 + swi r14, r1, PT_R14 + swi r14, r1, PT_PC + swi r15, r1, PT_R15 + swi r16, r1, PT_R16 + swi r17, r1, PT_R17 + swi r18, r1, PT_R18 + swi r19, r1, PT_R19 + swi r20, r1, PT_R20 + swi r21, r1, PT_R21 + swi r22, r1, PT_R22 + swi r23, r1, PT_R23 + swi r24, r1, PT_R24 + swi r25, r1, PT_R25 + swi r26, r1, PT_R26 + swi r27, r1, PT_R27 + swi r28, r1, PT_R28 + swi r29, r1, PT_R29 + swi r30, r1, PT_R30 + swi r31, r1, PT_R31 + /* special purpose registers */ + mfs r11, rmsr + swi r11, r1, PT_MSR + mfs r11, rear + swi r11, r1, PT_EAR + mfs r11, resr + swi r11, r1, PT_ESR + mfs r11, rfsr + swi r11, r1, PT_FSR + /* reload original stack pointer and save it */ + lwi r11, r0, PER_CPU(ENTRY_SP) + swi r11, r1, PT_R1 + /* update mode indicator we are in kernel mode */ + addik r11, r0, 1 + swi r11, r0, PER_CPU(KM) + /* restore r31 */ + lwi r31, r0, PER_CPU(CURRENT_SAVE) + /* prepare the link register, the argument and jump */ + addik r15, r0, ret_from_intr - 8 + addk r6, r0, r15 + braid do_IRQ + add r5, r0, r1 + +ret_from_intr: + lwi r11, r1, PT_MODE + bneid r11, no_intr_resched + +3: + lwi r6, r31, TS_THREAD_INFO /* get thread info */ + lwi r19, r6, TI_FLAGS /* get flags in thread info */ + /* do an extra work if any bits are set */ + + andi r11, r19, _TIF_NEED_RESCHED + beqi r11, 1f + bralid r15, schedule + nop + bri 3b +1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME + beqid r11, no_intr_resched + addk r5, r1, r0 + bralid r15, do_notify_resume + addk r6, r0, r0 + bri 3b + +no_intr_resched: + /* Disable interrupts, we are now committed to the state restore */ + disable_irq + + /* save mode indicator */ + lwi r11, r1, PT_MODE + swi r11, r0, PER_CPU(KM) + + /* save r31 */ + swi r31, r0, PER_CPU(CURRENT_SAVE) +restore_context: + /* special purpose registers */ + lwi r11, r1, PT_FSR + mts rfsr, r11 + lwi r11, r1, PT_ESR + mts resr, r11 + lwi r11, r1, PT_EAR + mts rear, r11 + lwi r11, r1, PT_MSR + mts rmsr, r11 + + lwi r31, r1, PT_R31 + lwi r30, r1, PT_R30 + lwi r29, r1, PT_R29 + lwi r28, r1, PT_R28 + lwi r27, r1, PT_R27 + lwi r26, r1, PT_R26 + lwi r25, r1, PT_R25 + lwi r24, r1, PT_R24 + lwi r23, r1, PT_R23 + lwi r22, r1, PT_R22 + lwi r21, r1, PT_R21 + lwi r20, r1, PT_R20 + lwi r19, r1, PT_R19 + lwi r18, r1, PT_R18 + lwi r17, r1, PT_R17 + lwi r16, r1, PT_R16 + lwi r15, r1, PT_R15 + lwi r14, r1, PT_PC + lwi r13, r1, PT_R13 + lwi r12, r1, PT_R12 + lwi r11, r1, PT_R11 + lwi r10, r1, PT_R10 + lwi r9, r1, PT_R9 + lwi r8, r1, PT_R8 + lwi r7, r1, PT_R7 + lwi r6, r1, PT_R6 + lwi r5, r1, PT_R5 + lwi r4, r1, PT_R4 + lwi r3, r1, PT_R3 + lwi r2, r1, PT_R2 + lwi r1, r1, PT_R1 + rtid r14, 0 + nop + +ENTRY(_reset) + brai 0; + +ENTRY(_user_exception) + swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */ + swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */ + lwi r11, r0, PER_CPU(KM) /* load mode indicator */ + beqid r11, 1f /* Already in kernel mode? */ + nop + brid 2f /* jump over */ + addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */ +1: /* Switch to kernel stack */ + lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */ + lwi r1, r1, TS_THREAD_INFO /* get the thread info */ + /* calculate kernel stack pointer */ + addik r1, r1, THREAD_SIZE - PT_SIZE +2: + swi r11, r1, PT_MODE /* store the mode */ + lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ + /* save them on stack */ + swi r2, r1, PT_R2 + swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */ + swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */ + swi r5, r1, PT_R5 + swi r6, r1, PT_R6 + swi r7, r1, PT_R7 + swi r8, r1, PT_R8 + swi r9, r1, PT_R9 + swi r10, r1, PT_R10 + swi r11, r1, PT_R11 + /* r12: _always_ in clobber list; see unistd.h */ + swi r12, r1, PT_R12 + swi r13, r1, PT_R13 + /* r14: _always_ in clobber list; see unistd.h */ + swi r14, r1, PT_R14 + /* but we want to return to the next inst. */ + addik r14, r14, 0x4 + swi r14, r1, PT_PC /* increment by 4 and store in pc */ + swi r15, r1, PT_R15 + swi r16, r1, PT_R16 + swi r17, r1, PT_R17 + swi r18, r1, PT_R18 + swi r19, r1, PT_R19 + swi r20, r1, PT_R20 + swi r21, r1, PT_R21 + swi r22, r1, PT_R22 + swi r23, r1, PT_R23 + swi r24, r1, PT_R24 + swi r25, r1, PT_R25 + swi r26, r1, PT_R26 + swi r27, r1, PT_R27 + swi r28, r1, PT_R28 + swi r29, r1, PT_R29 + swi r30, r1, PT_R30 + swi r31, r1, PT_R31 + + disable_irq + nop /* make sure IE bit is in effect */ + clear_bip /* once IE is in effect it is safe to clear BIP */ + nop + + /* special purpose registers */ + mfs r11, rmsr + swi r11, r1, PT_MSR + mfs r11, rear + swi r11, r1, PT_EAR + mfs r11, resr + swi r11, r1, PT_ESR + mfs r11, rfsr + swi r11, r1, PT_FSR + /* reload original stack pointer and save it */ + lwi r11, r0, PER_CPU(ENTRY_SP) + swi r11, r1, PT_R1 + /* update mode indicator we are in kernel mode */ + addik r11, r0, 1 + swi r11, r0, PER_CPU(KM) + /* restore r31 */ + lwi r31, r0, PER_CPU(CURRENT_SAVE) + /* re-enable interrupts now we are in kernel mode */ + enable_irq + + /* See if the system call number is valid. */ + addi r11, r12, -__NR_syscalls + bgei r11, 1f /* return to user if not valid */ + /* Figure out which function to use for this system call. */ + /* Note Microblaze barrel shift is optional, so don't rely on it */ + add r12, r12, r12 /* convert num -> ptr */ + addik r30, r0, 1 /* restarts allowed */ + add r12, r12, r12 + lwi r12, r12, sys_call_table /* Get function pointer */ + addik r15, r0, ret_to_user-8 /* set return address */ + bra r12 /* Make the system call. */ + bri 0 /* won't reach here */ +1: + brid ret_to_user /* jump to syscall epilogue */ + addi r3, r0, -ENOSYS /* set errno in delay slot */ + +/* + * Debug traps are like a system call, but entered via brki r14, 0x60 + * All we need to do is send the SIGTRAP signal to current, ptrace and + * do_notify_resume will handle the rest + */ +ENTRY(_debug_exception) + swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */ + lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */ + lwi r1, r1, TS_THREAD_INFO /* get the thread info */ + addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */ + swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */ + lwi r11, r0, PER_CPU(KM) /* load mode indicator */ +//save_context: + swi r11, r1, PT_MODE /* store the mode */ + lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ + /* save them on stack */ + swi r2, r1, PT_R2 + swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */ + swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */ + swi r5, r1, PT_R5 + swi r6, r1, PT_R6 + swi r7, r1, PT_R7 + swi r8, r1, PT_R8 + swi r9, r1, PT_R9 + swi r10, r1, PT_R10 + swi r11, r1, PT_R11 + /* r12: _always_ in clobber list; see unistd.h */ + swi r12, r1, PT_R12 + swi r13, r1, PT_R13 + /* r14: _always_ in clobber list; see unistd.h */ + swi r14, r1, PT_R14 + swi r14, r1, PT_PC /* Will return to interrupted instruction */ + swi r15, r1, PT_R15 + swi r16, r1, PT_R16 + swi r17, r1, PT_R17 + swi r18, r1, PT_R18 + swi r19, r1, PT_R19 + swi r20, r1, PT_R20 + swi r21, r1, PT_R21 + swi r22, r1, PT_R22 + swi r23, r1, PT_R23 + swi r24, r1, PT_R24 + swi r25, r1, PT_R25 + swi r26, r1, PT_R26 + swi r27, r1, PT_R27 + swi r28, r1, PT_R28 + swi r29, r1, PT_R29 + swi r30, r1, PT_R30 + swi r31, r1, PT_R31 + + disable_irq + nop /* make sure IE bit is in effect */ + clear_bip /* once IE is in effect it is safe to clear BIP */ + nop + + /* special purpose registers */ + mfs r11, rmsr + swi r11, r1, PT_MSR + mfs r11, rear + swi r11, r1, PT_EAR + mfs r11, resr + swi r11, r1, PT_ESR + mfs r11, rfsr + swi r11, r1, PT_FSR + /* reload original stack pointer and save it */ + lwi r11, r0, PER_CPU(ENTRY_SP) + swi r11, r1, PT_R1 + /* update mode indicator we are in kernel mode */ + addik r11, r0, 1 + swi r11, r0, PER_CPU(KM) + /* restore r31 */ + lwi r31, r0, PER_CPU(CURRENT_SAVE) + /* re-enable interrupts now we are in kernel mode */ + enable_irq + + addi r5, r0, SIGTRAP /* sending the trap signal */ + add r6, r0, r31 /* to current */ + bralid r15, send_sig + add r7, r0, r0 /* 3rd param zero */ + + addik r30, r0, 1 /* restarts allowed ??? */ + /* Restore r3/r4 to work around how ret_to_user works */ + lwi r3, r1, PT_R3 + lwi r4, r1, PT_R4 + bri ret_to_user + +ENTRY(_break) + bri 0 + +/* struct task_struct *_switch_to(struct thread_info *prev, + struct thread_info *next); */ +ENTRY(_switch_to) + /* prepare return value */ + addk r3, r0, r31 + + /* save registers in cpu_context */ + /* use r11 and r12, volatile registers, as temp register */ + addik r11, r5, TI_CPU_CONTEXT + swi r1, r11, CC_R1 + swi r2, r11, CC_R2 + /* skip volatile registers. + * they are saved on stack when we jumped to _switch_to() */ + /* dedicated registers */ + swi r13, r11, CC_R13 + swi r14, r11, CC_R14 + swi r15, r11, CC_R15 + swi r16, r11, CC_R16 + swi r17, r11, CC_R17 + swi r18, r11, CC_R18 + /* save non-volatile registers */ + swi r19, r11, CC_R19 + swi r20, r11, CC_R20 + swi r21, r11, CC_R21 + swi r22, r11, CC_R22 + swi r23, r11, CC_R23 + swi r24, r11, CC_R24 + swi r25, r11, CC_R25 + swi r26, r11, CC_R26 + swi r27, r11, CC_R27 + swi r28, r11, CC_R28 + swi r29, r11, CC_R29 + swi r30, r11, CC_R30 + /* special purpose registers */ + mfs r12, rmsr + swi r12, r11, CC_MSR + mfs r12, rear + swi r12, r11, CC_EAR + mfs r12, resr + swi r12, r11, CC_ESR + mfs r12, rfsr + swi r12, r11, CC_FSR + + /* update r31, the current */ + lwi r31, r6, TI_TASK + swi r31, r0, PER_CPU(CURRENT_SAVE) + + /* get new process' cpu context and restore */ + addik r11, r6, TI_CPU_CONTEXT + + /* special purpose registers */ + lwi r12, r11, CC_FSR + mts rfsr, r12 + lwi r12, r11, CC_ESR + mts resr, r12 + lwi r12, r11, CC_EAR + mts rear, r12 + lwi r12, r11, CC_MSR + mts rmsr, r12 + /* non-volatile registers */ + lwi r30, r11, CC_R30 + lwi r29, r11, CC_R29 + lwi r28, r11, CC_R28 + lwi r27, r11, CC_R27 + lwi r26, r11, CC_R26 + lwi r25, r11, CC_R25 + lwi r24, r11, CC_R24 + lwi r23, r11, CC_R23 + lwi r22, r11, CC_R22 + lwi r21, r11, CC_R21 + lwi r20, r11, CC_R20 + lwi r19, r11, CC_R19 + /* dedicated registers */ + lwi r18, r11, CC_R18 + lwi r17, r11, CC_R17 + lwi r16, r11, CC_R16 + lwi r15, r11, CC_R15 + lwi r14, r11, CC_R14 + lwi r13, r11, CC_R13 + /* skip volatile registers */ + lwi r2, r11, CC_R2 + lwi r1, r11, CC_R1 + + rtsd r15, 8 + nop + +ENTRY(ret_from_fork) + addk r5, r0, r3 + brlid r15, schedule_tail + nop + swi r31, r1, PT_R31 /* save r31 in user context. */ + /* will soon be restored to r31 in ret_to_user */ + addk r3, r0, r0 + brid ret_to_user + nop + +ENTRY(ret_from_kernel_thread) + brlid r15, schedule_tail + addk r5, r0, r3 + brald r15, r20 + addk r5, r0, r19 + brid ret_to_user + addk r3, r0, r0 + +work_pending: + lwi r11, r1, PT_MODE + bneid r11, 2f +3: + enable_irq + andi r11, r19, _TIF_NEED_RESCHED + beqi r11, 1f + bralid r15, schedule + nop + bri 4f +1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME + beqi r11, no_work_pending + addk r5, r30, r0 + bralid r15, do_notify_resume + addik r6, r0, 1 + addk r30, r0, r0 /* no restarts from now on */ +4: + disable_irq + lwi r6, r31, TS_THREAD_INFO /* get thread info */ + lwi r19, r6, TI_FLAGS /* get flags in thread info */ + bri 3b + +ENTRY(ret_to_user) + disable_irq + + swi r4, r1, PT_R4 /* return val */ + swi r3, r1, PT_R3 /* return val */ + + lwi r6, r31, TS_THREAD_INFO /* get thread info */ + lwi r19, r6, TI_FLAGS /* get flags in thread info */ + bnei r19, work_pending /* do an extra work if any bits are set */ +no_work_pending: + disable_irq + +2: + /* save r31 */ + swi r31, r0, PER_CPU(CURRENT_SAVE) + /* save mode indicator */ + lwi r18, r1, PT_MODE + swi r18, r0, PER_CPU(KM) +//restore_context: + /* special purpose registers */ + lwi r18, r1, PT_FSR + mts rfsr, r18 + lwi r18, r1, PT_ESR + mts resr, r18 + lwi r18, r1, PT_EAR + mts rear, r18 + lwi r18, r1, PT_MSR + mts rmsr, r18 + + lwi r31, r1, PT_R31 + lwi r30, r1, PT_R30 + lwi r29, r1, PT_R29 + lwi r28, r1, PT_R28 + lwi r27, r1, PT_R27 + lwi r26, r1, PT_R26 + lwi r25, r1, PT_R25 + lwi r24, r1, PT_R24 + lwi r23, r1, PT_R23 + lwi r22, r1, PT_R22 + lwi r21, r1, PT_R21 + lwi r20, r1, PT_R20 + lwi r19, r1, PT_R19 + lwi r18, r1, PT_R18 + lwi r17, r1, PT_R17 + lwi r16, r1, PT_R16 + lwi r15, r1, PT_R15 + lwi r14, r1, PT_PC + lwi r13, r1, PT_R13 + lwi r12, r1, PT_R12 + lwi r11, r1, PT_R11 + lwi r10, r1, PT_R10 + lwi r9, r1, PT_R9 + lwi r8, r1, PT_R8 + lwi r7, r1, PT_R7 + lwi r6, r1, PT_R6 + lwi r5, r1, PT_R5 + lwi r4, r1, PT_R4 /* return val */ + lwi r3, r1, PT_R3 /* return val */ + lwi r2, r1, PT_R2 + lwi r1, r1, PT_R1 + + rtid r14, 0 + nop + +sys_rt_sigreturn_wrapper: + addk r30, r0, r0 /* no restarts for this one */ + brid sys_rt_sigreturn + addk r5, r1, r0 + + /* Interrupt vector table */ + .section .init.ivt, "ax" + .org 0x0 + brai _reset + brai _user_exception + brai _interrupt + brai _break + brai _hw_exception_handler + .org 0x60 + brai _debug_exception + +.section .rodata,"a" +#include "syscall_table.S" + +syscall_table_size=(.-sys_call_table) + +type_SYSCALL: + .ascii "SYSCALL\0" +type_IRQ: + .ascii "IRQ\0" +type_IRQ_PREEMPT: + .ascii "IRQ (PREEMPTED)\0" +type_SYSCALL_PREEMPT: + .ascii " SYSCALL (PREEMPTED)\0" + + /* + * Trap decoding for stack unwinder + * Tuples are (start addr, end addr, string) + * If return address lies on [start addr, end addr], + * unwinder displays 'string' + */ + + .align 4 +.global microblaze_trap_handlers +microblaze_trap_handlers: + /* Exact matches come first */ + .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL + .word ret_from_intr; .word ret_from_intr ; .word type_IRQ + /* Fuzzy matches go here */ + .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT + .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT + /* End of table */ + .word 0 ; .word 0 ; .word 0 diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S new file mode 100644 index 000000000..4e1b567be --- /dev/null +++ b/arch/microblaze/kernel/entry.S @@ -0,0 +1,1015 @@ +/* + * Low-level system-call handling, trap handlers and context-switching + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au> + * Copyright (C) 2001,2002 NEC Corporation + * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + * Written by Miles Bader <miles@gnu.org> + * Heavily modified by John Williams for Microblaze + */ + +#include <linux/sys.h> +#include <linux/linkage.h> + +#include <asm/entry.h> +#include <asm/current.h> +#include <asm/processor.h> +#include <asm/exceptions.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> + +#include <asm/page.h> +#include <asm/unistd.h> + +#include <linux/errno.h> +#include <asm/signal.h> + +#undef DEBUG + +#ifdef DEBUG +/* Create space for syscalls counting. */ +.section .data +.global syscall_debug_table +.align 4 +syscall_debug_table: + .space (__NR_syscalls * 4) +#endif /* DEBUG */ + +#define C_ENTRY(name) .globl name; .align 4; name + +/* + * Various ways of setting and clearing BIP in flags reg. + * This is mucky, but necessary using microblaze version that + * allows msr ops to write to BIP + */ +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + .macro clear_bip + msrclr r0, MSR_BIP + .endm + + .macro set_bip + msrset r0, MSR_BIP + .endm + + .macro clear_eip + msrclr r0, MSR_EIP + .endm + + .macro set_ee + msrset r0, MSR_EE + .endm + + .macro disable_irq + msrclr r0, MSR_IE + .endm + + .macro enable_irq + msrset r0, MSR_IE + .endm + + .macro set_ums + msrset r0, MSR_UMS + msrclr r0, MSR_VMS + .endm + + .macro set_vms + msrclr r0, MSR_UMS + msrset r0, MSR_VMS + .endm + + .macro clear_ums + msrclr r0, MSR_UMS + .endm + + .macro clear_vms_ums + msrclr r0, MSR_VMS | MSR_UMS + .endm +#else + .macro clear_bip + mfs r11, rmsr + andi r11, r11, ~MSR_BIP + mts rmsr, r11 + .endm + + .macro set_bip + mfs r11, rmsr + ori r11, r11, MSR_BIP + mts rmsr, r11 + .endm + + .macro clear_eip + mfs r11, rmsr + andi r11, r11, ~MSR_EIP + mts rmsr, r11 + .endm + + .macro set_ee + mfs r11, rmsr + ori r11, r11, MSR_EE + mts rmsr, r11 + .endm + + .macro disable_irq + mfs r11, rmsr + andi r11, r11, ~MSR_IE + mts rmsr, r11 + .endm + + .macro enable_irq + mfs r11, rmsr + ori r11, r11, MSR_IE + mts rmsr, r11 + .endm + + .macro set_ums + mfs r11, rmsr + ori r11, r11, MSR_VMS + andni r11, r11, MSR_UMS + mts rmsr, r11 + .endm + + .macro set_vms + mfs r11, rmsr + ori r11, r11, MSR_VMS + andni r11, r11, MSR_UMS + mts rmsr, r11 + .endm + + .macro clear_ums + mfs r11, rmsr + andni r11, r11, MSR_UMS + mts rmsr,r11 + .endm + + .macro clear_vms_ums + mfs r11, rmsr + andni r11, r11, (MSR_VMS|MSR_UMS) + mts rmsr,r11 + .endm +#endif + +/* Define how to call high-level functions. With MMU, virtual mode must be + * enabled when calling the high-level function. Clobbers R11. + * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL + */ + +/* turn on virtual protected mode save */ +#define VM_ON \ + set_ums; \ + rted r0, 2f; \ + nop; \ +2: + +/* turn off virtual protected mode save and user mode save*/ +#define VM_OFF \ + clear_vms_ums; \ + rted r0, TOPHYS(1f); \ + nop; \ +1: + +#define SAVE_REGS \ + swi r2, r1, PT_R2; /* Save SDA */ \ + swi r3, r1, PT_R3; \ + swi r4, r1, PT_R4; \ + swi r5, r1, PT_R5; \ + swi r6, r1, PT_R6; \ + swi r7, r1, PT_R7; \ + swi r8, r1, PT_R8; \ + swi r9, r1, PT_R9; \ + swi r10, r1, PT_R10; \ + swi r11, r1, PT_R11; /* save clobbered regs after rval */\ + swi r12, r1, PT_R12; \ + swi r13, r1, PT_R13; /* Save SDA2 */ \ + swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \ + swi r15, r1, PT_R15; /* Save LP */ \ + swi r16, r1, PT_R16; \ + swi r17, r1, PT_R17; \ + swi r18, r1, PT_R18; /* Save asm scratch reg */ \ + swi r19, r1, PT_R19; \ + swi r20, r1, PT_R20; \ + swi r21, r1, PT_R21; \ + swi r22, r1, PT_R22; \ + swi r23, r1, PT_R23; \ + swi r24, r1, PT_R24; \ + swi r25, r1, PT_R25; \ + swi r26, r1, PT_R26; \ + swi r27, r1, PT_R27; \ + swi r28, r1, PT_R28; \ + swi r29, r1, PT_R29; \ + swi r30, r1, PT_R30; \ + swi r31, r1, PT_R31; /* Save current task reg */ \ + mfs r11, rmsr; /* save MSR */ \ + swi r11, r1, PT_MSR; + +#define RESTORE_REGS_GP \ + lwi r2, r1, PT_R2; /* restore SDA */ \ + lwi r3, r1, PT_R3; \ + lwi r4, r1, PT_R4; \ + lwi r5, r1, PT_R5; \ + lwi r6, r1, PT_R6; \ + lwi r7, r1, PT_R7; \ + lwi r8, r1, PT_R8; \ + lwi r9, r1, PT_R9; \ + lwi r10, r1, PT_R10; \ + lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\ + lwi r12, r1, PT_R12; \ + lwi r13, r1, PT_R13; /* restore SDA2 */ \ + lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ + lwi r15, r1, PT_R15; /* restore LP */ \ + lwi r16, r1, PT_R16; \ + lwi r17, r1, PT_R17; \ + lwi r18, r1, PT_R18; /* restore asm scratch reg */ \ + lwi r19, r1, PT_R19; \ + lwi r20, r1, PT_R20; \ + lwi r21, r1, PT_R21; \ + lwi r22, r1, PT_R22; \ + lwi r23, r1, PT_R23; \ + lwi r24, r1, PT_R24; \ + lwi r25, r1, PT_R25; \ + lwi r26, r1, PT_R26; \ + lwi r27, r1, PT_R27; \ + lwi r28, r1, PT_R28; \ + lwi r29, r1, PT_R29; \ + lwi r30, r1, PT_R30; \ + lwi r31, r1, PT_R31; /* Restore cur task reg */ + +#define RESTORE_REGS \ + lwi r11, r1, PT_MSR; \ + mts rmsr , r11; \ + RESTORE_REGS_GP + +#define RESTORE_REGS_RTBD \ + lwi r11, r1, PT_MSR; \ + andni r11, r11, MSR_EIP; /* clear EIP */ \ + ori r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */ \ + mts rmsr , r11; \ + RESTORE_REGS_GP + +#define SAVE_STATE \ + swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ + /* See if already in kernel mode.*/ \ + mfs r1, rmsr; \ + andi r1, r1, MSR_UMS; \ + bnei r1, 1f; \ + /* Kernel-mode state save. */ \ + /* Reload kernel stack-ptr. */ \ + lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ + /* FIXME: I can add these two lines to one */ \ + /* tophys(r1,r1); */ \ + /* addik r1, r1, -PT_SIZE; */ \ + addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ + SAVE_REGS \ + brid 2f; \ + swi r1, r1, PT_MODE; \ +1: /* User-mode state save. */ \ + lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ + tophys(r1,r1); \ + lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \ + /* MS these three instructions can be added to one */ \ + /* addik r1, r1, THREAD_SIZE; */ \ + /* tophys(r1,r1); */ \ + /* addik r1, r1, -PT_SIZE; */ \ + addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ + SAVE_REGS \ + lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ + swi r11, r1, PT_R1; /* Store user SP. */ \ + swi r0, r1, PT_MODE; /* Was in user-mode. */ \ + /* MS: I am clearing UMS even in case when I come from kernel space */ \ + clear_ums; \ +2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); + +.text + +/* + * User trap. + * + * System calls are handled here. + * + * Syscall protocol: + * Syscall number in r12, args in r5-r10 + * Return value in r3 + * + * Trap entered via brki instruction, so BIP bit is set, and interrupts + * are masked. This is nice, means we don't have to CLI before state save + */ +C_ENTRY(_user_exception): + swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ + addi r14, r14, 4 /* return address is 4 byte after call */ + + lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ + tophys(r1,r1); + lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ +/* calculate kernel stack pointer from task struct 8k */ + addik r1, r1, THREAD_SIZE; + tophys(r1,r1); + + addik r1, r1, -PT_SIZE; /* Make room on the stack. */ + SAVE_REGS + swi r0, r1, PT_R3 + swi r0, r1, PT_R4 + + swi r0, r1, PT_MODE; /* Was in user-mode. */ + lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); + swi r11, r1, PT_R1; /* Store user SP. */ + clear_ums; +2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); + /* Save away the syscall number. */ + swi r12, r1, PT_R0; + tovirt(r1,r1) + +/* where the trap should return need -8 to adjust for rtsd r15, 8*/ +/* Jump to the appropriate function for the system call number in r12 + * (r12 is not preserved), or return an error if r12 is not valid. The LP + * register should point to the location where + * the called function should return. [note that MAKE_SYS_CALL uses label 1] */ + + /* Step into virtual mode */ + rtbd r0, 3f + nop +3: + lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ + lwi r11, r11, TI_FLAGS /* get flags in thread info */ + andi r11, r11, _TIF_WORK_SYSCALL_MASK + beqi r11, 4f + + addik r3, r0, -ENOSYS + swi r3, r1, PT_R3 + brlid r15, do_syscall_trace_enter + addik r5, r1, PT_R0 + + # do_syscall_trace_enter returns the new syscall nr. + addk r12, r0, r3 + lwi r5, r1, PT_R5; + lwi r6, r1, PT_R6; + lwi r7, r1, PT_R7; + lwi r8, r1, PT_R8; + lwi r9, r1, PT_R9; + lwi r10, r1, PT_R10; +4: +/* Jump to the appropriate function for the system call number in r12 + * (r12 is not preserved), or return an error if r12 is not valid. + * The LP register should point to the location where the called function + * should return. [note that MAKE_SYS_CALL uses label 1] */ + /* See if the system call number is valid */ + blti r12, 5f + addi r11, r12, -__NR_syscalls; + bgei r11, 5f; + /* Figure out which function to use for this system call. */ + /* Note Microblaze barrel shift is optional, so don't rely on it */ + add r12, r12, r12; /* convert num -> ptr */ + add r12, r12, r12; + addi r30, r0, 1 /* restarts allowed */ + +#ifdef DEBUG + /* Trac syscalls and stored them to syscall_debug_table */ + /* The first syscall location stores total syscall number */ + lwi r3, r0, syscall_debug_table + addi r3, r3, 1 + swi r3, r0, syscall_debug_table + lwi r3, r12, syscall_debug_table + addi r3, r3, 1 + swi r3, r12, syscall_debug_table +#endif + + # Find and jump into the syscall handler. + lwi r12, r12, sys_call_table + /* where the trap should return need -8 to adjust for rtsd r15, 8 */ + addi r15, r0, ret_from_trap-8 + bra r12 + + /* The syscall number is invalid, return an error. */ +5: + braid ret_from_trap + addi r3, r0, -ENOSYS; + +/* Entry point used to return from a syscall/trap */ +/* We re-enable BIP bit before state restore */ +C_ENTRY(ret_from_trap): + swi r3, r1, PT_R3 + swi r4, r1, PT_R4 + + lwi r11, r1, PT_MODE; +/* See if returning to kernel mode, if so, skip resched &c. */ + bnei r11, 2f; + /* We're returning to user mode, so check for various conditions that + * trigger rescheduling. */ + /* FIXME: Restructure all these flag checks. */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ + lwi r11, r11, TI_FLAGS; /* get flags in thread info */ + andi r11, r11, _TIF_WORK_SYSCALL_MASK + beqi r11, 1f + + brlid r15, do_syscall_trace_leave + addik r5, r1, PT_R0 +1: + /* We're returning to user mode, so check for various conditions that + * trigger rescheduling. */ + /* get thread info from current task */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; + lwi r19, r11, TI_FLAGS; /* get flags in thread info */ + andi r11, r19, _TIF_NEED_RESCHED; + beqi r11, 5f; + + bralid r15, schedule; /* Call scheduler */ + nop; /* delay slot */ + bri 1b + + /* Maybe handle a signal */ +5: + andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; + beqi r11, 4f; /* Signals to handle, handle them */ + + addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ + bralid r15, do_notify_resume; /* Handle any signals */ + add r6, r30, r0; /* Arg 2: int in_syscall */ + add r30, r0, r0 /* no more restarts */ + bri 1b + +/* Finally, return to user state. */ +4: set_bip; /* Ints masked for state restore */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ + VM_OFF; + tophys(r1,r1); + RESTORE_REGS_RTBD; + addik r1, r1, PT_SIZE /* Clean up stack space. */ + lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ + bri 6f; + +/* Return to kernel state. */ +2: set_bip; /* Ints masked for state restore */ + VM_OFF; + tophys(r1,r1); + RESTORE_REGS_RTBD; + addik r1, r1, PT_SIZE /* Clean up stack space. */ + tovirt(r1,r1); +6: +TRAP_return: /* Make global symbol for debugging */ + rtbd r14, 0; /* Instructions to return from an IRQ */ + nop; + + +/* This the initial entry point for a new child thread, with an appropriate + stack in place that makes it look the the child is in the middle of an + syscall. This function is actually `returned to' from switch_thread + (copy_thread makes ret_from_fork the return address in each new thread's + saved context). */ +C_ENTRY(ret_from_fork): + bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ + add r5, r3, r0; /* switch_thread returns the prev task */ + /* ( in the delay slot ) */ + brid ret_from_trap; /* Do normal trap return */ + add r3, r0, r0; /* Child's fork call should return 0. */ + +C_ENTRY(ret_from_kernel_thread): + bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ + add r5, r3, r0; /* switch_thread returns the prev task */ + /* ( in the delay slot ) */ + brald r15, r20 /* fn was left in r20 */ + addk r5, r0, r19 /* ... and argument - in r19 */ + brid ret_from_trap + add r3, r0, r0 + +C_ENTRY(sys_rt_sigreturn_wrapper): + addik r30, r0, 0 /* no restarts */ + brid sys_rt_sigreturn /* Do real work */ + addik r5, r1, 0; /* add user context as 1st arg */ + +/* + * HW EXCEPTION rutine start + */ +C_ENTRY(full_exception_trap): + /* adjust exception address for privileged instruction + * for finding where is it */ + addik r17, r17, -4 + SAVE_STATE /* Save registers */ + /* PC, before IRQ/trap - this is one instruction above */ + swi r17, r1, PT_PC; + tovirt(r1,r1) + /* FIXME this can be store directly in PT_ESR reg. + * I tested it but there is a fault */ + /* where the trap should return need -8 to adjust for rtsd r15, 8 */ + addik r15, r0, ret_from_exc - 8 + mfs r6, resr + mfs r7, rfsr; /* save FSR */ + mts rfsr, r0; /* Clear sticky fsr */ + rted r0, full_exception + addik r5, r1, 0 /* parameter struct pt_regs * regs */ + +/* + * Unaligned data trap. + * + * Unaligned data trap last on 4k page is handled here. + * + * Trap entered via exception, so EE bit is set, and interrupts + * are masked. This is nice, means we don't have to CLI before state save + * + * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S" + */ +C_ENTRY(unaligned_data_trap): + /* MS: I have to save r11 value and then restore it because + * set_bit, clear_eip, set_ee use r11 as temp register if MSR + * instructions are not used. We don't need to do if MSR instructions + * are used and they use r0 instead of r11. + * I am using ENTRY_SP which should be primary used only for stack + * pointer saving. */ + swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); + set_bip; /* equalize initial state for all possible entries */ + clear_eip; + set_ee; + lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); + SAVE_STATE /* Save registers.*/ + /* PC, before IRQ/trap - this is one instruction above */ + swi r17, r1, PT_PC; + tovirt(r1,r1) + /* where the trap should return need -8 to adjust for rtsd r15, 8 */ + addik r15, r0, ret_from_exc-8 + mfs r3, resr /* ESR */ + mfs r4, rear /* EAR */ + rtbd r0, _unaligned_data_exception + addik r7, r1, 0 /* parameter struct pt_regs * regs */ + +/* + * Page fault traps. + * + * If the real exception handler (from hw_exception_handler.S) didn't find + * the mapping for the process, then we're thrown here to handle such situation. + * + * Trap entered via exceptions, so EE bit is set, and interrupts + * are masked. This is nice, means we don't have to CLI before state save + * + * Build a standard exception frame for TLB Access errors. All TLB exceptions + * will bail out to this point if they can't resolve the lightweight TLB fault. + * + * The C function called is in "arch/microblaze/mm/fault.c", declared as: + * void do_page_fault(struct pt_regs *regs, + * unsigned long address, + * unsigned long error_code) + */ +/* data and intruction trap - which is choose is resolved int fault.c */ +C_ENTRY(page_fault_data_trap): + SAVE_STATE /* Save registers.*/ + /* PC, before IRQ/trap - this is one instruction above */ + swi r17, r1, PT_PC; + tovirt(r1,r1) + /* where the trap should return need -8 to adjust for rtsd r15, 8 */ + addik r15, r0, ret_from_exc-8 + mfs r6, rear /* parameter unsigned long address */ + mfs r7, resr /* parameter unsigned long error_code */ + rted r0, do_page_fault + addik r5, r1, 0 /* parameter struct pt_regs * regs */ + +C_ENTRY(page_fault_instr_trap): + SAVE_STATE /* Save registers.*/ + /* PC, before IRQ/trap - this is one instruction above */ + swi r17, r1, PT_PC; + tovirt(r1,r1) + /* where the trap should return need -8 to adjust for rtsd r15, 8 */ + addik r15, r0, ret_from_exc-8 + mfs r6, rear /* parameter unsigned long address */ + ori r7, r0, 0 /* parameter unsigned long error_code */ + rted r0, do_page_fault + addik r5, r1, 0 /* parameter struct pt_regs * regs */ + +/* Entry point used to return from an exception. */ +C_ENTRY(ret_from_exc): + lwi r11, r1, PT_MODE; + bnei r11, 2f; /* See if returning to kernel mode, */ + /* ... if so, skip resched &c. */ + + /* We're returning to user mode, so check for various conditions that + trigger rescheduling. */ +1: + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ + lwi r19, r11, TI_FLAGS; /* get flags in thread info */ + andi r11, r19, _TIF_NEED_RESCHED; + beqi r11, 5f; + +/* Call the scheduler before returning from a syscall/trap. */ + bralid r15, schedule; /* Call scheduler */ + nop; /* delay slot */ + bri 1b + + /* Maybe handle a signal */ +5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; + beqi r11, 4f; /* Signals to handle, handle them */ + + /* + * Handle a signal return; Pending signals should be in r18. + * + * Not all registers are saved by the normal trap/interrupt entry + * points (for instance, call-saved registers (because the normal + * C-compiler calling sequence in the kernel makes sure they're + * preserved), and call-clobbered registers in the case of + * traps), but signal handlers may want to examine or change the + * complete register state. Here we save anything not saved by + * the normal entry sequence, so that it may be safely restored + * (in a possibly modified form) after do_notify_resume returns. */ + addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ + bralid r15, do_notify_resume; /* Handle any signals */ + addi r6, r0, 0; /* Arg 2: int in_syscall */ + bri 1b + +/* Finally, return to user state. */ +4: set_bip; /* Ints masked for state restore */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ + VM_OFF; + tophys(r1,r1); + + RESTORE_REGS_RTBD; + addik r1, r1, PT_SIZE /* Clean up stack space. */ + + lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ + bri 6f; +/* Return to kernel state. */ +2: set_bip; /* Ints masked for state restore */ + VM_OFF; + tophys(r1,r1); + RESTORE_REGS_RTBD; + addik r1, r1, PT_SIZE /* Clean up stack space. */ + + tovirt(r1,r1); +6: +EXC_return: /* Make global symbol for debugging */ + rtbd r14, 0; /* Instructions to return from an IRQ */ + nop; + +/* + * HW EXCEPTION rutine end + */ + +/* + * Hardware maskable interrupts. + * + * The stack-pointer (r1) should have already been saved to the memory + * location PER_CPU(ENTRY_SP). + */ +C_ENTRY(_interrupt): +/* MS: we are in physical address */ +/* Save registers, switch to proper stack, convert SP to virtual.*/ + swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) + /* MS: See if already in kernel mode. */ + mfs r1, rmsr + nop + andi r1, r1, MSR_UMS + bnei r1, 1f + +/* Kernel-mode state save. */ + lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) + tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ + /* save registers */ +/* MS: Make room on the stack -> activation record */ + addik r1, r1, -PT_SIZE; + SAVE_REGS + brid 2f; + swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */ +1: +/* User-mode state save. */ + /* MS: get the saved current */ + lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); + tophys(r1,r1); + lwi r1, r1, TS_THREAD_INFO; + addik r1, r1, THREAD_SIZE; + tophys(r1,r1); + /* save registers */ + addik r1, r1, -PT_SIZE; + SAVE_REGS + /* calculate mode */ + swi r0, r1, PT_MODE; + lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); + swi r11, r1, PT_R1; + clear_ums; +2: + lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); + tovirt(r1,r1) + addik r15, r0, irq_call; +irq_call:rtbd r0, do_IRQ; + addik r5, r1, 0; + +/* MS: we are in virtual mode */ +ret_from_irq: + lwi r11, r1, PT_MODE; + bnei r11, 2f; + +1: + lwi r11, CURRENT_TASK, TS_THREAD_INFO; + lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */ + andi r11, r19, _TIF_NEED_RESCHED; + beqi r11, 5f + bralid r15, schedule; + nop; /* delay slot */ + bri 1b + + /* Maybe handle a signal */ +5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; + beqid r11, no_intr_resched +/* Handle a signal return; Pending signals should be in r18. */ + addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ + bralid r15, do_notify_resume; /* Handle any signals */ + addi r6, r0, 0; /* Arg 2: int in_syscall */ + bri 1b + +/* Finally, return to user state. */ +no_intr_resched: + /* Disable interrupts, we are now committed to the state restore */ + disable_irq + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); + VM_OFF; + tophys(r1,r1); + RESTORE_REGS + addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ + lwi r1, r1, PT_R1 - PT_SIZE; + bri 6f; +/* MS: Return to kernel state. */ +2: +#ifdef CONFIG_PREEMPT + lwi r11, CURRENT_TASK, TS_THREAD_INFO; + /* MS: get preempt_count from thread info */ + lwi r5, r11, TI_PREEMPT_COUNT; + bgti r5, restore; + + lwi r5, r11, TI_FLAGS; /* get flags in thread info */ + andi r5, r5, _TIF_NEED_RESCHED; + beqi r5, restore /* if zero jump over */ + +preempt: + /* interrupts are off that's why I am calling preempt_chedule_irq */ + bralid r15, preempt_schedule_irq + nop + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ + lwi r5, r11, TI_FLAGS; /* get flags in thread info */ + andi r5, r5, _TIF_NEED_RESCHED; + bnei r5, preempt /* if non zero jump to resched */ +restore: +#endif + VM_OFF /* MS: turn off MMU */ + tophys(r1,r1) + RESTORE_REGS + addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ + tovirt(r1,r1); +6: +IRQ_return: /* MS: Make global symbol for debugging */ + rtid r14, 0 + nop + +/* + * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18 + * and call handling function with saved pt_regs + */ +C_ENTRY(_debug_exception): + /* BIP bit is set on entry, no interrupts can occur */ + swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) + + mfs r1, rmsr + nop + andi r1, r1, MSR_UMS + bnei r1, 1f +/* MS: Kernel-mode state save - kgdb */ + lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ + + /* BIP bit is set on entry, no interrupts can occur */ + addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; + SAVE_REGS; + /* save all regs to pt_reg structure */ + swi r0, r1, PT_R0; /* R0 must be saved too */ + swi r14, r1, PT_R14 /* rewrite saved R14 value */ + swi r16, r1, PT_PC; /* PC and r16 are the same */ + /* save special purpose registers to pt_regs */ + mfs r11, rear; + swi r11, r1, PT_EAR; + mfs r11, resr; + swi r11, r1, PT_ESR; + mfs r11, rfsr; + swi r11, r1, PT_FSR; + + /* stack pointer is in physical address at it is decrease + * by PT_SIZE but we need to get correct R1 value */ + addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE; + swi r11, r1, PT_R1 + /* MS: r31 - current pointer isn't changed */ + tovirt(r1,r1) +#ifdef CONFIG_KGDB + addi r5, r1, 0 /* pass pt_reg address as the first arg */ + addik r15, r0, dbtrap_call; /* return address */ + rtbd r0, microblaze_kgdb_break + nop; +#endif + /* MS: Place handler for brki from kernel space if KGDB is OFF. + * It is very unlikely that another brki instruction is called. */ + bri 0 + +/* MS: User-mode state save - gdb */ +1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ + tophys(r1,r1); + lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ + addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ + tophys(r1,r1); + + addik r1, r1, -PT_SIZE; /* Make room on the stack. */ + SAVE_REGS; + swi r16, r1, PT_PC; /* Save LP */ + swi r0, r1, PT_MODE; /* Was in user-mode. */ + lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); + swi r11, r1, PT_R1; /* Store user SP. */ + lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); + tovirt(r1,r1) + set_vms; + addik r5, r1, 0; + addik r15, r0, dbtrap_call; +dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ + rtbd r0, sw_exception + nop + + /* MS: The first instruction for the second part of the gdb/kgdb */ + set_bip; /* Ints masked for state restore */ + lwi r11, r1, PT_MODE; + bnei r11, 2f; +/* MS: Return to user space - gdb */ +1: + /* Get current task ptr into r11 */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ + lwi r19, r11, TI_FLAGS; /* get flags in thread info */ + andi r11, r19, _TIF_NEED_RESCHED; + beqi r11, 5f; + + /* Call the scheduler before returning from a syscall/trap. */ + bralid r15, schedule; /* Call scheduler */ + nop; /* delay slot */ + bri 1b + + /* Maybe handle a signal */ +5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; + beqi r11, 4f; /* Signals to handle, handle them */ + + addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ + bralid r15, do_notify_resume; /* Handle any signals */ + addi r6, r0, 0; /* Arg 2: int in_syscall */ + bri 1b + +/* Finally, return to user state. */ +4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ + VM_OFF; + tophys(r1,r1); + /* MS: Restore all regs */ + RESTORE_REGS_RTBD + addik r1, r1, PT_SIZE /* Clean up stack space */ + lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */ +DBTRAP_return_user: /* MS: Make global symbol for debugging */ + rtbd r16, 0; /* MS: Instructions to return from a debug trap */ + nop; + +/* MS: Return to kernel state - kgdb */ +2: VM_OFF; + tophys(r1,r1); + /* MS: Restore all regs */ + RESTORE_REGS_RTBD + lwi r14, r1, PT_R14; + lwi r16, r1, PT_PC; + addik r1, r1, PT_SIZE; /* MS: Clean up stack space */ + tovirt(r1,r1); +DBTRAP_return_kernel: /* MS: Make global symbol for debugging */ + rtbd r16, 0; /* MS: Instructions to return from a debug trap */ + nop; + + +ENTRY(_switch_to) + /* prepare return value */ + addk r3, r0, CURRENT_TASK + + /* save registers in cpu_context */ + /* use r11 and r12, volatile registers, as temp register */ + /* give start of cpu_context for previous process */ + addik r11, r5, TI_CPU_CONTEXT + swi r1, r11, CC_R1 + swi r2, r11, CC_R2 + /* skip volatile registers. + * they are saved on stack when we jumped to _switch_to() */ + /* dedicated registers */ + swi r13, r11, CC_R13 + swi r14, r11, CC_R14 + swi r15, r11, CC_R15 + swi r16, r11, CC_R16 + swi r17, r11, CC_R17 + swi r18, r11, CC_R18 + /* save non-volatile registers */ + swi r19, r11, CC_R19 + swi r20, r11, CC_R20 + swi r21, r11, CC_R21 + swi r22, r11, CC_R22 + swi r23, r11, CC_R23 + swi r24, r11, CC_R24 + swi r25, r11, CC_R25 + swi r26, r11, CC_R26 + swi r27, r11, CC_R27 + swi r28, r11, CC_R28 + swi r29, r11, CC_R29 + swi r30, r11, CC_R30 + /* special purpose registers */ + mfs r12, rmsr + swi r12, r11, CC_MSR + mfs r12, rear + swi r12, r11, CC_EAR + mfs r12, resr + swi r12, r11, CC_ESR + mfs r12, rfsr + swi r12, r11, CC_FSR + + /* update r31, the current-give me pointer to task which will be next */ + lwi CURRENT_TASK, r6, TI_TASK + /* stored it to current_save too */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) + + /* get new process' cpu context and restore */ + /* give me start where start context of next task */ + addik r11, r6, TI_CPU_CONTEXT + + /* non-volatile registers */ + lwi r30, r11, CC_R30 + lwi r29, r11, CC_R29 + lwi r28, r11, CC_R28 + lwi r27, r11, CC_R27 + lwi r26, r11, CC_R26 + lwi r25, r11, CC_R25 + lwi r24, r11, CC_R24 + lwi r23, r11, CC_R23 + lwi r22, r11, CC_R22 + lwi r21, r11, CC_R21 + lwi r20, r11, CC_R20 + lwi r19, r11, CC_R19 + /* dedicated registers */ + lwi r18, r11, CC_R18 + lwi r17, r11, CC_R17 + lwi r16, r11, CC_R16 + lwi r15, r11, CC_R15 + lwi r14, r11, CC_R14 + lwi r13, r11, CC_R13 + /* skip volatile registers */ + lwi r2, r11, CC_R2 + lwi r1, r11, CC_R1 + + /* special purpose registers */ + lwi r12, r11, CC_FSR + mts rfsr, r12 + lwi r12, r11, CC_MSR + mts rmsr, r12 + + rtsd r15, 8 + nop + +ENTRY(_reset) + brai 0; /* Jump to reset vector */ + + /* These are compiled and loaded into high memory, then + * copied into place in mach_early_setup */ + .section .init.ivt, "ax" +#if CONFIG_MANUAL_RESET_VECTOR + .org 0x0 + brai CONFIG_MANUAL_RESET_VECTOR +#endif + .org 0x8 + brai TOPHYS(_user_exception); /* syscall handler */ + .org 0x10 + brai TOPHYS(_interrupt); /* Interrupt handler */ + .org 0x18 + brai TOPHYS(_debug_exception); /* debug trap handler */ + .org 0x20 + brai TOPHYS(_hw_exception_handler); /* HW exception handler */ + +.section .rodata,"a" +#include "syscall_table.S" + +syscall_table_size=(.-sys_call_table) + +type_SYSCALL: + .ascii "SYSCALL\0" +type_IRQ: + .ascii "IRQ\0" +type_IRQ_PREEMPT: + .ascii "IRQ (PREEMPTED)\0" +type_SYSCALL_PREEMPT: + .ascii " SYSCALL (PREEMPTED)\0" + + /* + * Trap decoding for stack unwinder + * Tuples are (start addr, end addr, string) + * If return address lies on [start addr, end addr], + * unwinder displays 'string' + */ + + .align 4 +.global microblaze_trap_handlers +microblaze_trap_handlers: + /* Exact matches come first */ + .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL + .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ + /* Fuzzy matches go here */ + .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT + .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT + /* End of table */ + .word 0 ; .word 0 ; .word 0 diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c new file mode 100644 index 000000000..eafff21fc --- /dev/null +++ b/arch/microblaze/kernel/exceptions.c @@ -0,0 +1,149 @@ +/* + * HW exception handling + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +/* + * This file handles the architecture-dependent parts of hardware exceptions + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/kallsyms.h> + +#include <asm/exceptions.h> +#include <asm/entry.h> /* For KM CPU var */ +#include <linux/uaccess.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <asm/current.h> +#include <asm/cacheflush.h> + +#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 +#define MICROBLAZE_IBUS_EXCEPTION 0x03 +#define MICROBLAZE_DBUS_EXCEPTION 0x04 +#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 +#define MICROBLAZE_FPU_EXCEPTION 0x06 +#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07 + +static DEFINE_SPINLOCK(die_lock); + +void die(const char *str, struct pt_regs *fp, long err) +{ + console_verbose(); + spin_lock_irq(&die_lock); + pr_warn("Oops: %s, sig: %ld\n", str, err); + show_regs(fp); + spin_unlock_irq(&die_lock); + /* do_exit() should take care of panic'ing from an interrupt + * context so we don't handle it here + */ + do_exit(err); +} + +/* for user application debugging */ +asmlinkage void sw_exception(struct pt_regs *regs) +{ + _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); + flush_dcache_range(regs->r16, regs->r16 + 0x4); + flush_icache_range(regs->r16, regs->r16 + 0x4); +} + +void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) +{ + if (kernel_mode(regs)) + die("Exception in kernel mode", regs, signr); + + force_sig_fault(signr, code, (void __user *)addr, current); +} + +asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, + int fsr, int addr) +{ +#ifdef CONFIG_MMU + addr = regs->pc; +#endif + +#if 0 + pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n", + type, user_mode(regs) ? "user" : "kernel", fsr, + (unsigned int) regs->pc, (unsigned int) regs->esr); +#endif + + switch (type & 0x1F) { + case MICROBLAZE_ILL_OPCODE_EXCEPTION: + if (user_mode(regs)) { + pr_debug("Illegal opcode exception in user mode\n"); + _exception(SIGILL, regs, ILL_ILLOPC, addr); + return; + } + pr_warn("Illegal opcode exception in kernel mode.\n"); + die("opcode exception", regs, SIGBUS); + break; + case MICROBLAZE_IBUS_EXCEPTION: + if (user_mode(regs)) { + pr_debug("Instruction bus error exception in user mode\n"); + _exception(SIGBUS, regs, BUS_ADRERR, addr); + return; + } + pr_warn("Instruction bus error exception in kernel mode.\n"); + die("bus exception", regs, SIGBUS); + break; + case MICROBLAZE_DBUS_EXCEPTION: + if (user_mode(regs)) { + pr_debug("Data bus error exception in user mode\n"); + _exception(SIGBUS, regs, BUS_ADRERR, addr); + return; + } + pr_warn("Data bus error exception in kernel mode.\n"); + die("bus exception", regs, SIGBUS); + break; + case MICROBLAZE_DIV_ZERO_EXCEPTION: + if (user_mode(regs)) { + pr_debug("Divide by zero exception in user mode\n"); + _exception(SIGFPE, regs, FPE_INTDIV, addr); + return; + } + pr_warn("Divide by zero exception in kernel mode.\n"); + die("Divide by zero exception", regs, SIGBUS); + break; + case MICROBLAZE_FPU_EXCEPTION: + pr_debug("FPU exception\n"); + /* IEEE FP exception */ + /* I removed fsr variable and use code var for storing fsr */ + if (fsr & FSR_IO) + fsr = FPE_FLTINV; + else if (fsr & FSR_OF) + fsr = FPE_FLTOVF; + else if (fsr & FSR_UF) + fsr = FPE_FLTUND; + else if (fsr & FSR_DZ) + fsr = FPE_FLTDIV; + else if (fsr & FSR_DO) + fsr = FPE_FLTRES; + _exception(SIGFPE, regs, fsr, addr); + break; + +#ifdef CONFIG_MMU + case MICROBLAZE_PRIVILEGED_EXCEPTION: + pr_debug("Privileged exception\n"); + _exception(SIGILL, regs, ILL_PRVOPC, addr); + break; +#endif + default: + /* FIXME what to do in unexpected exception */ + pr_warn("Unexpected exception %02x PC=%08x in %s mode\n", + type, (unsigned int) addr, + kernel_mode(regs) ? "kernel" : "user"); + } + return; +} diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c new file mode 100644 index 000000000..224eea40e --- /dev/null +++ b/arch/microblaze/kernel/ftrace.c @@ -0,0 +1,222 @@ +/* + * Ftrace support for Microblaze. + * + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * Based on MIPS and PowerPC ftrace code + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <asm/cacheflush.h> +#include <linux/ftrace.h> + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + unsigned long old; + int faulted; + unsigned long return_hooker = (unsigned long) + &return_to_handler; + + if (unlikely(ftrace_graph_is_dead())) + return; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Protect against fault, even if it shouldn't + * happen. This tool is too much intrusive to + * ignore such a protection. + */ + asm volatile(" 1: lwi %0, %2, 0;" \ + "2: swi %3, %2, 0;" \ + " addik %1, r0, 0;" \ + "3:" \ + " .section .fixup, \"ax\";" \ + "4: brid 3b;" \ + " addik %1, r0, 1;" \ + " .previous;" \ + " .section __ex_table,\"a\";" \ + " .word 1b,4b;" \ + " .word 2b,4b;" \ + " .previous;" \ + : "=&r" (old), "=r" (faulted) + : "r" (parent), "r" (return_hooker) + ); + + flush_dcache_range((u32)parent, (u32)parent + 4); + flush_icache_range((u32)parent, (u32)parent + 4); + + if (unlikely(faulted)) { + ftrace_graph_stop(); + WARN_ON(1); + return; + } + + if (function_graph_enter(old, self_addr, 0, NULL)) + *parent = old; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_DYNAMIC_FTRACE +/* save value to addr - it is save to do it in asm */ +static int ftrace_modify_code(unsigned long addr, unsigned int value) +{ + int faulted = 0; + + __asm__ __volatile__(" 1: swi %2, %1, 0;" \ + " addik %0, r0, 0;" \ + "2:" \ + " .section .fixup, \"ax\";" \ + "3: brid 2b;" \ + " addik %0, r0, 1;" \ + " .previous;" \ + " .section __ex_table,\"a\";" \ + " .word 1b,3b;" \ + " .previous;" \ + : "=r" (faulted) + : "r" (addr), "r" (value) + ); + + if (unlikely(faulted)) + return -EFAULT; + + flush_dcache_range(addr, addr + 4); + flush_icache_range(addr, addr + 4); + + return 0; +} + +#define MICROBLAZE_NOP 0x80000000 +#define MICROBLAZE_BRI 0xb800000C + +static unsigned int recorded; /* if save was or not */ +static unsigned int imm; /* saving whole imm instruction */ + +/* There are two approaches howto solve ftrace_make nop function - look below */ +#undef USE_FTRACE_NOP + +#ifdef USE_FTRACE_NOP +static unsigned int bralid; /* saving whole bralid instruction */ +#endif + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + /* we have this part of code which we are working with + * b000c000 imm -16384 + * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> + * 80000000 or r0, r0, r0 + * + * The first solution (!USE_FTRACE_NOP-could be called branch solution) + * b000c000 bri 12 (0xC - jump to any other instruction) + * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> + * 80000000 or r0, r0, r0 + * any other instruction + * + * The second solution (USE_FTRACE_NOP) - no jump just nops + * 80000000 or r0, r0, r0 + * 80000000 or r0, r0, r0 + * 80000000 or r0, r0, r0 + */ + int ret = 0; + + if (recorded == 0) { + recorded = 1; + imm = *(unsigned int *)rec->ip; + pr_debug("%s: imm:0x%x\n", __func__, imm); +#ifdef USE_FTRACE_NOP + bralid = *(unsigned int *)(rec->ip + 4); + pr_debug("%s: bralid 0x%x\n", __func__, bralid); +#endif /* USE_FTRACE_NOP */ + } + +#ifdef USE_FTRACE_NOP + ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); + ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); +#else /* USE_FTRACE_NOP */ + ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); +#endif /* USE_FTRACE_NOP */ + return ret; +} + +/* I believe that first is called ftrace_make_nop before this function */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + int ret; + pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", + __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); + ret = ftrace_modify_code(rec->ip, imm); +#ifdef USE_FTRACE_NOP + pr_debug("%s: bralid:0x%x\n", __func__, bralid); + ret += ftrace_modify_code(rec->ip + 4, bralid); +#endif /* USE_FTRACE_NOP */ + return ret; +} + +int __init ftrace_dyn_arch_init(void) +{ + return 0; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + unsigned int upper = (unsigned int)func; + unsigned int lower = (unsigned int)func; + int ret = 0; + + /* create proper saving to ftrace_call poll */ + upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ + lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ + + pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", + __func__, (unsigned int)func, (unsigned int)ip, upper, lower); + + /* save upper and lower code */ + ret = ftrace_modify_code(ip, upper); + ret += ftrace_modify_code(ip + 4, lower); + + /* We just need to replace the rtsd r15, 8 with NOP */ + ret += ftrace_modify_code((unsigned long)&ftrace_caller, + MICROBLAZE_NOP); + + return ret; +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +unsigned int old_jump; /* saving place for jump instruction */ + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned int ret; + unsigned long ip = (unsigned long)(&ftrace_call_graph); + + old_jump = *(unsigned int *)ip; /* save jump over instruction */ + ret = ftrace_modify_code(ip, MICROBLAZE_NOP); + + pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); + return ret; +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned int ret; + unsigned long ip = (unsigned long)(&ftrace_call_graph); + + ret = ftrace_modify_code(ip, old_jump); + + pr_debug("%s\n", __func__); + return ret; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S new file mode 100644 index 000000000..f264fdcf1 --- /dev/null +++ b/arch/microblaze/kernel/head.S @@ -0,0 +1,386 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * MMU code derived from arch/ppc/kernel/head_4xx.S: + * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> + * Initial PowerPC version. + * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> + * Rewritten for PReP + * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> + * Low-level exception handers, MMU support, and rewrite. + * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> + * PowerPC 8xx modifications. + * Copyright (c) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <asm/thread_info.h> +#include <asm/page.h> +#include <linux/of_fdt.h> /* for OF_DT_HEADER */ + +#ifdef CONFIG_MMU +#include <asm/setup.h> /* COMMAND_LINE_SIZE */ +#include <asm/mmu.h> +#include <asm/processor.h> + +.section .data +.global empty_zero_page +.align 12 +empty_zero_page: + .space PAGE_SIZE +.global swapper_pg_dir +swapper_pg_dir: + .space PAGE_SIZE + +#endif /* CONFIG_MMU */ + +.section .rodata +.align 4 +endian_check: + .word 1 + + __HEAD +ENTRY(_start) +#if CONFIG_KERNEL_BASE_ADDR == 0 + brai TOPHYS(real_start) + .org 0x100 +real_start: +#endif + + mts rmsr, r0 +/* Disable stack protection from bootloader */ + mts rslr, r0 + addi r8, r0, 0xFFFFFFFF + mts rshr, r8 +/* + * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' + * if the msrclr instruction is not enabled. We use this to detect + * if the opcode is available, by issuing msrclr and then testing the result. + * r8 == 0 - msr instructions are implemented + * r8 != 0 - msr instructions are not implemented + */ + mfs r1, rmsr + msrclr r8, 0 /* clear nothing - just read msr for test */ + cmpu r8, r8, r1 /* r1 must contain msr reg content */ + +/* r7 may point to an FDT, or there may be one linked in. + if it's in r7, we've got to save it away ASAP. + We ensure r7 points to a valid FDT, just in case the bootloader + is broken or non-existent */ + beqi r7, no_fdt_arg /* NULL pointer? don't copy */ +/* Does r7 point to a valid FDT? Load HEADER magic number */ + /* Run time Big/Little endian platform */ + /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */ + lbui r11, r0, TOPHYS(endian_check) + beqid r11, big_endian /* DO NOT break delay stop dependency */ + lw r11, r0, r7 /* Big endian load in delay slot */ + lwr r11, r0, r7 /* Little endian load */ +big_endian: + rsubi r11, r11, OF_DT_HEADER /* Check FDT header */ + beqi r11, _prepare_copy_fdt + or r7, r0, r0 /* clear R7 when not valid DTB */ + bnei r11, no_fdt_arg /* No - get out of here */ +_prepare_copy_fdt: + or r11, r0, r0 /* incremment */ + ori r4, r0, TOPHYS(_fdt_start) + ori r3, r0, (0x8000 - 4) +_copy_fdt: + lw r12, r7, r11 /* r12 = r7 + r11 */ + sw r12, r4, r11 /* addr[r4 + r11] = r12 */ + addik r11, r11, 4 /* increment counting */ + bgtid r3, _copy_fdt /* loop for all entries */ + addik r3, r3, -4 /* descrement loop */ +no_fdt_arg: + +#ifdef CONFIG_MMU + +#ifndef CONFIG_CMDLINE_BOOL +/* + * handling command line + * copy command line directly to cmd_line placed in data section. + */ + beqid r5, skip /* Skip if NULL pointer */ + or r11, r0, r0 /* incremment */ + ori r4, r0, cmd_line /* load address of command line */ + tophys(r4,r4) /* convert to phys address */ + ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ +_copy_command_line: + /* r2=r5+r6 - r5 contain pointer to command line */ + lbu r2, r5, r11 + beqid r2, skip /* Skip if no data */ + sb r2, r4, r11 /* addr[r4+r6]= r2 */ + addik r11, r11, 1 /* increment counting */ + bgtid r3, _copy_command_line /* loop for all entries */ + addik r3, r3, -1 /* decrement loop */ + addik r5, r4, 0 /* add new space for command line */ + tovirt(r5,r5) +skip: +#endif /* CONFIG_CMDLINE_BOOL */ + +#ifdef NOT_COMPILE +/* save bram context */ + or r11, r0, r0 /* incremment */ + ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */ + ori r3, r0, (LMB_SIZE - 4) +_copy_bram: + lw r7, r0, r11 /* r7 = r0 + r6 */ + sw r7, r4, r11 /* addr[r4 + r6] = r7 */ + addik r11, r11, 4 /* increment counting */ + bgtid r3, _copy_bram /* loop for all entries */ + addik r3, r3, -4 /* descrement loop */ +#endif + /* We have to turn on the MMU right away. */ + + /* + * Set up the initial MMU state so we can do the first level of + * kernel initialization. This maps the first 16 MBytes of memory 1:1 + * virtual to physical. + */ + nop + addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */ +_invalidate: + mts rtlbx, r3 + mts rtlbhi, r0 /* flush: ensure V is clear */ + mts rtlblo, r0 + bgtid r3, _invalidate /* loop for all entries */ + addik r3, r3, -1 + /* sync */ + + /* Setup the kernel PID */ + mts rpid,r0 /* Load the kernel PID */ + nop + bri 4 + + /* + * We should still be executing code at physical address area + * RAM_BASEADDR at this point. However, kernel code is at + * a virtual address. So, set up a TLB mapping to cover this once + * translation is enabled. + */ + + addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ + tophys(r4,r3) /* Load the kernel physical address */ + + /* start to do TLB calculation */ + addik r12, r0, _end + rsub r12, r3, r12 + addik r12, r12, CONFIG_LOWMEM_SIZE >> PTE_SHIFT /* that's the pad */ + + or r9, r0, r0 /* TLB0 = 0 */ + or r10, r0, r0 /* TLB1 = 0 */ + + addik r11, r12, -0x1000000 + bgei r11, GT16 /* size is greater than 16MB */ + addik r11, r12, -0x0800000 + bgei r11, GT8 /* size is greater than 8MB */ + addik r11, r12, -0x0400000 + bgei r11, GT4 /* size is greater than 4MB */ + /* size is less than 4MB */ + addik r11, r12, -0x0200000 + bgei r11, GT2 /* size is greater than 2MB */ + addik r9, r0, 0x0100000 /* TLB0 must be 1MB */ + addik r11, r12, -0x0100000 + bgei r11, GT1 /* size is greater than 1MB */ + /* TLB1 is 0 which is setup above */ + bri tlb_end +GT4: /* r11 contains the rest - will be either 1 or 4 */ + ori r9, r0, 0x400000 /* TLB0 is 4MB */ + bri TLB1 +GT16: /* TLB0 is 16MB */ + addik r9, r0, 0x1000000 /* means TLB0 is 16MB */ +TLB1: + /* must be used r2 because of subtract if failed */ + addik r2, r11, -0x0400000 + bgei r2, GT20 /* size is greater than 16MB */ + /* size is >16MB and <20MB */ + addik r11, r11, -0x0100000 + bgei r11, GT17 /* size is greater than 17MB */ + /* kernel is >16MB and < 17MB */ +GT1: + addik r10, r0, 0x0100000 /* means TLB1 is 1MB */ + bri tlb_end +GT2: /* TLB0 is 0 and TLB1 will be 4MB */ +GT17: /* TLB1 is 4MB - kernel size <20MB */ + addik r10, r0, 0x0400000 /* means TLB1 is 4MB */ + bri tlb_end +GT8: /* TLB0 is still zero that's why I can use only TLB1 */ +GT20: /* TLB1 is 16MB - kernel size >20MB */ + addik r10, r0, 0x1000000 /* means TLB1 is 16MB */ +tlb_end: + + /* + * Configure and load two entries into TLB slots 0 and 1. + * In case we are pinning TLBs, these are reserved in by the + * other TLB functions. If not reserving, then it doesn't + * matter where they are loaded. + */ + andi r4,r4,0xfffffc00 /* Mask off the real page number */ + ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ + + /* + * TLB0 is always used - check if is not zero (r9 stores TLB0 value) + * if is use TLB1 value and clear it (r10 stores TLB1 value) + */ + bnei r9, tlb0_not_zero + add r9, r10, r0 + add r10, r0, r0 +tlb0_not_zero: + + /* look at the code below */ + ori r30, r0, 0x200 + andi r29, r9, 0x100000 + bneid r29, 1f + addik r30, r30, 0x80 + andi r29, r9, 0x400000 + bneid r29, 1f + addik r30, r30, 0x80 + andi r29, r9, 0x1000000 + bneid r29, 1f + addik r30, r30, 0x80 +1: + andi r3,r3,0xfffffc00 /* Mask off the effective page number */ + ori r3,r3,(TLB_VALID) + or r3, r3, r30 + + /* Load tlb_skip size value which is index to first unused TLB entry */ + lwi r11, r0, TOPHYS(tlb_skip) + mts rtlbx,r11 /* TLB slow 0 */ + + mts rtlblo,r4 /* Load the data portion of the entry */ + mts rtlbhi,r3 /* Load the tag portion of the entry */ + + /* Increase tlb_skip size */ + addik r11, r11, 1 + swi r11, r0, TOPHYS(tlb_skip) + + /* TLB1 can be zeroes that's why we not setup it */ + beqi r10, jump_over2 + + /* look at the code below */ + ori r30, r0, 0x200 + andi r29, r10, 0x100000 + bneid r29, 1f + addik r30, r30, 0x80 + andi r29, r10, 0x400000 + bneid r29, 1f + addik r30, r30, 0x80 + andi r29, r10, 0x1000000 + bneid r29, 1f + addik r30, r30, 0x80 +1: + addk r4, r4, r9 /* previous addr + TLB0 size */ + addk r3, r3, r9 + + andi r3,r3,0xfffffc00 /* Mask off the effective page number */ + ori r3,r3,(TLB_VALID) + or r3, r3, r30 + + lwi r11, r0, TOPHYS(tlb_skip) + mts rtlbx, r11 /* r11 is used from TLB0 */ + + mts rtlblo,r4 /* Load the data portion of the entry */ + mts rtlbhi,r3 /* Load the tag portion of the entry */ + + /* Increase tlb_skip size */ + addik r11, r11, 1 + swi r11, r0, TOPHYS(tlb_skip) + +jump_over2: + /* + * Load a TLB entry for LMB, since we need access to + * the exception vectors, using a 4k real==virtual mapping. + */ + /* Use temporary TLB_ID for LMB - clear this temporary mapping later */ + ori r11, r0, MICROBLAZE_LMB_TLB_ID + mts rtlbx,r11 + + ori r4,r0,(TLB_WR | TLB_EX) + ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) + + mts rtlblo,r4 /* Load the data portion of the entry */ + mts rtlbhi,r3 /* Load the tag portion of the entry */ + + /* + * We now have the lower 16 Meg of RAM mapped into TLB entries, and the + * caches ready to work. + */ +turn_on_mmu: + ori r15,r0,start_here + ori r4,r0,MSR_KERNEL_VMS + mts rmsr,r4 + nop + rted r15,0 /* enables MMU */ + nop + +start_here: +#endif /* CONFIG_MMU */ + + /* Initialize small data anchors */ + addik r13, r0, _KERNEL_SDA_BASE_ + addik r2, r0, _KERNEL_SDA2_BASE_ + + /* Initialize stack pointer */ + addik r1, r0, init_thread_union + THREAD_SIZE - 4 + + /* Initialize r31 with current task address */ + addik r31, r0, init_task + + addik r11, r0, machine_early_init + brald r15, r11 + nop + +#ifndef CONFIG_MMU + addik r15, r0, machine_halt + braid start_kernel + nop +#else + /* + * Initialize the MMU. + */ + bralid r15, mmu_init + nop + + /* Go back to running unmapped so we can load up new values + * and change to using our exception vectors. + * On the MicroBlaze, all we invalidate the used TLB entries to clear + * the old 16M byte TLB mappings. + */ + ori r15,r0,TOPHYS(kernel_load_context) + ori r4,r0,MSR_KERNEL + mts rmsr,r4 + nop + bri 4 + rted r15,0 + nop + + /* Load up the kernel context */ +kernel_load_context: + ori r5, r0, MICROBLAZE_LMB_TLB_ID + mts rtlbx,r5 + nop + mts rtlbhi,r0 + nop + addi r15, r0, machine_halt + ori r17, r0, start_kernel + ori r4, r0, MSR_KERNEL_VMS + mts rmsr, r4 + nop + rted r17, 0 /* enable MMU and jump to start_kernel */ + nop +#endif /* CONFIG_MMU */ diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S new file mode 100644 index 000000000..0b11a4469 --- /dev/null +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -0,0 +1,1225 @@ +/* + * Exception handling for Microblaze + * + * Rewriten interrupt handling + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * + * uClinux customisation (C) 2005 John Williams + * + * MMU code derived from arch/ppc/kernel/head_4xx.S: + * Copyright (C) 1995-1996 Gary Thomas <gdt@linuxppc.org> + * Initial PowerPC version. + * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> + * Rewritten for PReP + * Copyright (C) 1996 Paul Mackerras <paulus@cs.anu.edu.au> + * Low-level exception handers, MMU support, and rewrite. + * Copyright (C) 1997 Dan Malek <dmalek@jlc.net> + * PowerPC 8xx modifications. + * Copyright (C) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (C) 1999 Grant Erickson <grant@lcse.umn.edu> + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * + * Original code + * Copyright (C) 2004 Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +/* + * Here are the handlers which don't require enabling translation + * and calling other kernel code thus we can keep their design very simple + * and do all processing in real mode. All what they need is a valid current + * (that is an issue for the CONFIG_REGISTER_TASK_PTR case) + * This handlers use r3,r4,r5,r6 and optionally r[current] to work therefore + * these registers are saved/restored + * The handlers which require translation are in entry.S --KAA + * + * Microblaze HW Exception Handler + * - Non self-modifying exception handler for the following exception conditions + * - Unalignment + * - Instruction bus error + * - Data bus error + * - Illegal instruction opcode + * - Divide-by-zero + * + * - Privileged instruction exception (MMU) + * - Data storage exception (MMU) + * - Instruction storage exception (MMU) + * - Data TLB miss exception (MMU) + * - Instruction TLB miss exception (MMU) + * + * Note we disable interrupts during exception handling, otherwise we will + * possibly get multiple re-entrancy if interrupt handles themselves cause + * exceptions. JW + */ + +#include <asm/exceptions.h> +#include <asm/unistd.h> +#include <asm/page.h> + +#include <asm/entry.h> +#include <asm/current.h> +#include <linux/linkage.h> + +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/signal.h> +#include <asm/registers.h> +#include <asm/asm-offsets.h> + +#undef DEBUG + +/* Helpful Macros */ +#define NUM_TO_REG(num) r ## num + +#ifdef CONFIG_MMU + #define RESTORE_STATE \ + lwi r5, r1, 0; \ + mts rmsr, r5; \ + nop; \ + lwi r3, r1, PT_R3; \ + lwi r4, r1, PT_R4; \ + lwi r5, r1, PT_R5; \ + lwi r6, r1, PT_R6; \ + lwi r11, r1, PT_R11; \ + lwi r31, r1, PT_R31; \ + lwi r1, r1, PT_R1; +#endif /* CONFIG_MMU */ + +#define LWREG_NOP \ + bri ex_handler_unhandled; \ + nop; + +#define SWREG_NOP \ + bri ex_handler_unhandled; \ + nop; + +/* FIXME this is weird - for noMMU kernel is not possible to use brid + * instruction which can shorten executed time + */ + +/* r3 is the source */ +#define R3_TO_LWREG_V(regnum) \ + swi r3, r1, 4 * regnum; \ + bri ex_handler_done; + +/* r3 is the source */ +#define R3_TO_LWREG(regnum) \ + or NUM_TO_REG (regnum), r0, r3; \ + bri ex_handler_done; + +/* r3 is the target */ +#define SWREG_TO_R3_V(regnum) \ + lwi r3, r1, 4 * regnum; \ + bri ex_sw_tail; + +/* r3 is the target */ +#define SWREG_TO_R3(regnum) \ + or r3, r0, NUM_TO_REG (regnum); \ + bri ex_sw_tail; + +#ifdef CONFIG_MMU + #define R3_TO_LWREG_VM_V(regnum) \ + brid ex_lw_end_vm; \ + swi r3, r7, 4 * regnum; + + #define R3_TO_LWREG_VM(regnum) \ + brid ex_lw_end_vm; \ + or NUM_TO_REG (regnum), r0, r3; + + #define SWREG_TO_R3_VM_V(regnum) \ + brid ex_sw_tail_vm; \ + lwi r3, r7, 4 * regnum; + + #define SWREG_TO_R3_VM(regnum) \ + brid ex_sw_tail_vm; \ + or r3, r0, NUM_TO_REG (regnum); + + /* Shift right instruction depending on available configuration */ + #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL == 0 + /* Only the used shift constants defined here - add more if needed */ + #define BSRLI2(rD, rA) \ + srl rD, rA; /* << 1 */ \ + srl rD, rD; /* << 2 */ + #define BSRLI4(rD, rA) \ + BSRLI2(rD, rA); \ + BSRLI2(rD, rD) + #define BSRLI10(rD, rA) \ + srl rD, rA; /* << 1 */ \ + srl rD, rD; /* << 2 */ \ + srl rD, rD; /* << 3 */ \ + srl rD, rD; /* << 4 */ \ + srl rD, rD; /* << 5 */ \ + srl rD, rD; /* << 6 */ \ + srl rD, rD; /* << 7 */ \ + srl rD, rD; /* << 8 */ \ + srl rD, rD; /* << 9 */ \ + srl rD, rD /* << 10 */ + #define BSRLI20(rD, rA) \ + BSRLI10(rD, rA); \ + BSRLI10(rD, rD) + + .macro bsrli, rD, rA, IMM + .if (\IMM) == 2 + BSRLI2(\rD, \rA) + .elseif (\IMM) == 10 + BSRLI10(\rD, \rA) + .elseif (\IMM) == 12 + BSRLI2(\rD, \rA) + BSRLI10(\rD, \rD) + .elseif (\IMM) == 14 + BSRLI4(\rD, \rA) + BSRLI10(\rD, \rD) + .elseif (\IMM) == 20 + BSRLI20(\rD, \rA) + .elseif (\IMM) == 24 + BSRLI4(\rD, \rA) + BSRLI20(\rD, \rD) + .elseif (\IMM) == 28 + BSRLI4(\rD, \rA) + BSRLI4(\rD, \rD) + BSRLI20(\rD, \rD) + .else + .error "BSRLI shift macros \IMM" + .endif + .endm + #endif + +#endif /* CONFIG_MMU */ + +.extern other_exception_handler /* Defined in exception.c */ + +/* + * hw_exception_handler - Handler for exceptions + * + * Exception handler notes: + * - Handles all exceptions + * - Does not handle unaligned exceptions during load into r17, r1, r0. + * - Does not handle unaligned exceptions during store from r17 (cannot be + * done) and r1 (slows down common case) + * + * Relevant register structures + * + * EAR - |----|----|----|----|----|----|----|----| + * - < ## 32 bit faulting address ## > + * + * ESR - |----|----|----|----|----| - | - |-----|-----| + * - W S REG EXC + * + * + * STACK FRAME STRUCTURE (for CONFIG_MMU=n) + * ---------------------------------------- + * + * +-------------+ + 0 + * | MSR | + * +-------------+ + 4 + * | r1 | + * | . | + * | . | + * | . | + * | . | + * | r18 | + * +-------------+ + 76 + * | . | + * | . | + * + * MMU kernel uses the same 'pt_pool_space' pointed space + * which is used for storing register values - noMMu style was, that values were + * stored in stack but in case of failure you lost information about register. + * Currently you can see register value in memory in specific place. + * In compare to with previous solution the speed should be the same. + * + * MMU exception handler has different handling compare to no MMU kernel. + * Exception handler use jump table for directing of what happen. For MMU kernel + * is this approach better because MMU relate exception are handled by asm code + * in this file. In compare to with MMU expect of unaligned exception + * is everything handled by C code. + */ + +/* + * every of these handlers is entered having R3/4/5/6/11/current saved on stack + * and clobbered so care should be taken to restore them if someone is going to + * return from exception + */ + +/* wrappers to restore state before coming to entry.S */ +#ifdef CONFIG_MMU +.section .data +.align 4 +pt_pool_space: + .space PT_SIZE + +#ifdef DEBUG +/* Create space for exception counting. */ +.section .data +.global exception_debug_table +.align 4 +exception_debug_table: + /* Look at exception vector table. There is 32 exceptions * word size */ + .space (32 * 4) +#endif /* DEBUG */ + +.section .rodata +.align 4 +_MB_HW_ExceptionVectorTable: +/* 0 - Undefined */ + .long TOPHYS(ex_handler_unhandled) +/* 1 - Unaligned data access exception */ + .long TOPHYS(handle_unaligned_ex) +/* 2 - Illegal op-code exception */ + .long TOPHYS(full_exception_trapw) +/* 3 - Instruction bus error exception */ + .long TOPHYS(full_exception_trapw) +/* 4 - Data bus error exception */ + .long TOPHYS(full_exception_trapw) +/* 5 - Divide by zero exception */ + .long TOPHYS(full_exception_trapw) +/* 6 - Floating point unit exception */ + .long TOPHYS(full_exception_trapw) +/* 7 - Privileged instruction exception */ + .long TOPHYS(full_exception_trapw) +/* 8 - 15 - Undefined */ + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) +/* 16 - Data storage exception */ + .long TOPHYS(handle_data_storage_exception) +/* 17 - Instruction storage exception */ + .long TOPHYS(handle_instruction_storage_exception) +/* 18 - Data TLB miss exception */ + .long TOPHYS(handle_data_tlb_miss_exception) +/* 19 - Instruction TLB miss exception */ + .long TOPHYS(handle_instruction_tlb_miss_exception) +/* 20 - 31 - Undefined */ + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) + .long TOPHYS(ex_handler_unhandled) +#endif + +.global _hw_exception_handler +.section .text +.align 4 +.ent _hw_exception_handler +_hw_exception_handler: +#ifndef CONFIG_MMU + addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ +#else + swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */ + /* Save date to kernel memory. Here is the problem + * when you came from user space */ + ori r1, r0, TOPHYS(pt_pool_space); +#endif + swi r3, r1, PT_R3 + swi r4, r1, PT_R4 + swi r5, r1, PT_R5 + swi r6, r1, PT_R6 + +#ifdef CONFIG_MMU + swi r11, r1, PT_R11 + swi r31, r1, PT_R31 + lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */ +#endif + + mfs r5, rmsr; + nop + swi r5, r1, 0; + mfs r4, resr + nop + mfs r3, rear; + nop + +#ifndef CONFIG_MMU + andi r5, r4, 0x1000; /* Check ESR[DS] */ + beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ + mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ + nop +not_in_delay_slot: + swi r17, r1, PT_R17 +#endif + + andi r5, r4, 0x1F; /* Extract ESR[EXC] */ + +#ifdef CONFIG_MMU + /* Calculate exception vector offset = r5 << 2 */ + addk r6, r5, r5; /* << 1 */ + addk r6, r6, r6; /* << 2 */ + +#ifdef DEBUG +/* counting which exception happen */ + lwi r5, r0, TOPHYS(exception_debug_table) + addi r5, r5, 1 + swi r5, r0, TOPHYS(exception_debug_table) + lwi r5, r6, TOPHYS(exception_debug_table) + addi r5, r5, 1 + swi r5, r6, TOPHYS(exception_debug_table) +#endif +/* end */ + /* Load the HW Exception vector */ + lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) + bra r6 + +full_exception_trapw: + RESTORE_STATE + bri full_exception_trap +#else + /* Exceptions enabled here. This will allow nested exceptions */ + mfs r6, rmsr; + nop + swi r6, r1, 0; /* RMSR_OFFSET */ + ori r6, r6, 0x100; /* Turn ON the EE bit */ + andi r6, r6, ~2; /* Disable interrupts */ + mts rmsr, r6; + nop + + xori r6, r5, 1; /* 00001 = Unaligned Exception */ + /* Jump to unalignment exception handler */ + beqi r6, handle_unaligned_ex; + +handle_other_ex: /* Handle Other exceptions here */ + /* Save other volatiles before we make procedure calls below */ + swi r7, r1, PT_R7 + swi r8, r1, PT_R8 + swi r9, r1, PT_R9 + swi r10, r1, PT_R10 + swi r11, r1, PT_R11 + swi r12, r1, PT_R12 + swi r14, r1, PT_R14 + swi r15, r1, PT_R15 + swi r18, r1, PT_R18 + + or r5, r1, r0 + andi r6, r4, 0x1F; /* Load ESR[EC] */ + lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ + swi r7, r1, PT_MODE + mfs r7, rfsr + nop + addk r8, r17, r0; /* Load exception address */ + bralid r15, full_exception; /* Branch to the handler */ + nop; + mts rfsr, r0; /* Clear sticky fsr */ + nop + + /* + * Trigger execution of the signal handler by enabling + * interrupts and calling an invalid syscall. + */ + mfs r5, rmsr; + nop + ori r5, r5, 2; + mts rmsr, r5; /* enable interrupt */ + nop + addi r12, r0, __NR_syscalls; + brki r14, 0x08; + mfs r5, rmsr; /* disable interrupt */ + nop + andi r5, r5, ~2; + mts rmsr, r5; + nop + + lwi r7, r1, PT_R7 + lwi r8, r1, PT_R8 + lwi r9, r1, PT_R9 + lwi r10, r1, PT_R10 + lwi r11, r1, PT_R11 + lwi r12, r1, PT_R12 + lwi r14, r1, PT_R14 + lwi r15, r1, PT_R15 + lwi r18, r1, PT_R18 + + bri ex_handler_done; /* Complete exception handling */ +#endif + +/* 0x01 - Unaligned data access exception + * This occurs when a word access is not aligned on a word boundary, + * or when a 16-bit access is not aligned on a 16-bit boundary. + * This handler perform the access, and returns, except for MMU when + * the unaligned address is last on a 4k page or the physical address is + * not found in the page table, in which case unaligned_data_trap is called. + */ +handle_unaligned_ex: + /* Working registers already saved: R3, R4, R5, R6 + * R4 = ESR + * R3 = EAR + */ +#ifdef CONFIG_MMU + andi r6, r4, 0x1000 /* Check ESR[DS] */ + beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ + mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ + nop +_no_delayslot: + /* jump to high level unaligned handler */ + RESTORE_STATE; + bri unaligned_data_trap +#endif + andi r6, r4, 0x3E0; /* Mask and extract the register operand */ + srl r6, r6; /* r6 >> 5 */ + srl r6, r6; + srl r6, r6; + srl r6, r6; + srl r6, r6; + /* Store the register operand in a temporary location */ + sbi r6, r0, TOPHYS(ex_reg_op); + + andi r6, r4, 0x400; /* Extract ESR[S] */ + bnei r6, ex_sw; +ex_lw: + andi r6, r4, 0x800; /* Extract ESR[W] */ + beqi r6, ex_lhw; + lbui r5, r3, 0; /* Exception address in r3 */ + /* Load a word, byte-by-byte from destination address + and save it in tmp space */ + sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); + lbui r5, r3, 1; + sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); + lbui r5, r3, 2; + sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); + lbui r5, r3, 3; + sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); + /* Get the destination register value into r4 */ + lwi r4, r0, TOPHYS(ex_tmp_data_loc_0); + bri ex_lw_tail; +ex_lhw: + lbui r5, r3, 0; /* Exception address in r3 */ + /* Load a half-word, byte-by-byte from destination + address and save it in tmp space */ + sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); + lbui r5, r3, 1; + sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); + /* Get the destination register value into r4 */ + lhui r4, r0, TOPHYS(ex_tmp_data_loc_0); +ex_lw_tail: + /* Get the destination register number into r5 */ + lbui r5, r0, TOPHYS(ex_reg_op); + /* Form load_word jump table offset (lw_table + (8 * regnum)) */ + addik r6, r0, TOPHYS(lw_table); + addk r5, r5, r5; + addk r5, r5, r5; + addk r5, r5, r5; + addk r5, r5, r6; + bra r5; +ex_lw_end: /* Exception handling of load word, ends */ +ex_sw: + /* Get the destination register number into r5 */ + lbui r5, r0, TOPHYS(ex_reg_op); + /* Form store_word jump table offset (sw_table + (8 * regnum)) */ + addik r6, r0, TOPHYS(sw_table); + add r5, r5, r5; + add r5, r5, r5; + add r5, r5, r5; + add r5, r5, r6; + bra r5; +ex_sw_tail: + mfs r6, resr; + nop + andi r6, r6, 0x800; /* Extract ESR[W] */ + beqi r6, ex_shw; + /* Get the word - delay slot */ + swi r4, r0, TOPHYS(ex_tmp_data_loc_0); + /* Store the word, byte-by-byte into destination address */ + lbui r4, r0, TOPHYS(ex_tmp_data_loc_0); + sbi r4, r3, 0; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_1); + sbi r4, r3, 1; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); + sbi r4, r3, 2; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); + sbi r4, r3, 3; + bri ex_handler_done; + +ex_shw: + /* Store the lower half-word, byte-by-byte into destination address */ + swi r4, r0, TOPHYS(ex_tmp_data_loc_0); + lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); + sbi r4, r3, 0; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); + sbi r4, r3, 1; +ex_sw_end: /* Exception handling of store word, ends. */ + +ex_handler_done: +#ifndef CONFIG_MMU + lwi r5, r1, 0 /* RMSR */ + mts rmsr, r5 + nop + lwi r3, r1, PT_R3 + lwi r4, r1, PT_R4 + lwi r5, r1, PT_R5 + lwi r6, r1, PT_R6 + lwi r17, r1, PT_R17 + + rted r17, 0 + addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ +#else + RESTORE_STATE; + rted r17, 0 + nop +#endif + +#ifdef CONFIG_MMU + /* Exception vector entry code. This code runs with address translation + * turned off (i.e. using physical addresses). */ + + /* Exception vectors. */ + + /* 0x10 - Data Storage Exception + * This happens for just a few reasons. U0 set (but we don't do that), + * or zone protection fault (user violation, write to protected page). + * If this is just an update of modified status, we do that quickly + * and exit. Otherwise, we call heavyweight functions to do the work. + */ + handle_data_storage_exception: + /* Working registers already saved: R3, R4, R5, R6 + * R3 = ESR + */ + mfs r11, rpid + nop + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + ori r5, r0, CONFIG_KERNEL_START + cmpu r5, r3, r5 + bgti r5, ex3 + /* First, check if it was a zone fault (which means a user + * tried to access a kernel or read-protected page - always + * a SEGV). All other faults here must be stores, so no + * need to check ESR_S as well. */ + andi r4, r4, ESR_DIZ /* ESR_Z - zone protection */ + bnei r4, ex2 + + ori r4, r0, swapper_pg_dir + mts rpid, r0 /* TLB will have 0 TID */ + nop + bri ex4 + + /* Get the PGD for the current thread. */ + ex3: + /* First, check if it was a zone fault (which means a user + * tried to access a kernel or read-protected page - always + * a SEGV). All other faults here must be stores, so no + * need to check ESR_S as well. */ + andi r4, r4, ESR_DIZ /* ESR_Z */ + bnei r4, ex2 + /* get current task address */ + addi r4 ,CURRENT_TASK, TOPHYS(0); + lwi r4, r4, TASK_THREAD+PGDIR + ex4: + tophys(r4,r4) + /* Create L1 (pgdir/pmd) address */ + bsrli r5, r3, PGDIR_SHIFT - 2 + andi r5, r5, PAGE_SIZE - 4 +/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ + or r4, r4, r5 + lwi r4, r4, 0 /* Get L1 entry */ + andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ + beqi r5, ex2 /* Bail if no table */ + + tophys(r5,r5) + bsrli r6, r3, PTE_SHIFT /* Compute PTE address */ + andi r6, r6, PAGE_SIZE - 4 + or r5, r5, r6 + lwi r4, r5, 0 /* Get Linux PTE */ + + andi r6, r4, _PAGE_RW /* Is it writeable? */ + beqi r6, ex2 /* Bail if not */ + + /* Update 'changed' */ + ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE + swi r4, r5, 0 /* Update Linux page table */ + + /* Most of the Linux PTE is ready to load into the TLB LO. + * We set ZSEL, where only the LS-bit determines user access. + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * If shared is set, we cause a zero PID->TID load. + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ +/* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */ + andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ + TLB_ZSEL(1) | TLB_ATTR_MASK + ori r4, r4, _PAGE_HWEXEC /* make it executable */ + + /* find the TLB index that caused the fault. It has to be here*/ + mts rtlbsx, r3 + nop + mfs r5, rtlbx /* DEBUG: TBD */ + nop + mts rtlblo, r4 /* Load TLB LO */ + nop + /* Will sync shadow TLBs */ + + /* Done...restore registers and get out of here. */ + mts rpid, r11 + nop + bri 4 + + RESTORE_STATE; + rted r17, 0 + nop + ex2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. */ + mts rpid, r11 + nop + bri 4 + RESTORE_STATE; + bri page_fault_data_trap + + + /* 0x11 - Instruction Storage Exception + * This is caused by a fetch from non-execute or guarded pages. */ + handle_instruction_storage_exception: + /* Working registers already saved: R3, R4, R5, R6 + * R3 = ESR + */ + + RESTORE_STATE; + bri page_fault_instr_trap + + /* 0x12 - Data TLB Miss Exception + * As the name implies, translation is not in the MMU, so search the + * page tables and fix it. The only purpose of this function is to + * load TLB entries from the page table if they exist. + */ + handle_data_tlb_miss_exception: + /* Working registers already saved: R3, R4, R5, R6 + * R3 = EAR, R4 = ESR + */ + mfs r11, rpid + nop + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. */ + ori r6, r0, CONFIG_KERNEL_START + cmpu r4, r3, r6 + bgti r4, ex5 + ori r4, r0, swapper_pg_dir + mts rpid, r0 /* TLB will have 0 TID */ + nop + bri ex6 + + /* Get the PGD for the current thread. */ + ex5: + /* get current task address */ + addi r4 ,CURRENT_TASK, TOPHYS(0); + lwi r4, r4, TASK_THREAD+PGDIR + ex6: + tophys(r4,r4) + /* Create L1 (pgdir/pmd) address */ + bsrli r5, r3, PGDIR_SHIFT - 2 + andi r5, r5, PAGE_SIZE - 4 +/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ + or r4, r4, r5 + lwi r4, r4, 0 /* Get L1 entry */ + andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ + beqi r5, ex7 /* Bail if no table */ + + tophys(r5,r5) + bsrli r6, r3, PTE_SHIFT /* Compute PTE address */ + andi r6, r6, PAGE_SIZE - 4 + or r5, r5, r6 + lwi r4, r5, 0 /* Get Linux PTE */ + + andi r6, r4, _PAGE_PRESENT + beqi r6, ex7 + + ori r4, r4, _PAGE_ACCESSED + swi r4, r5, 0 + + /* Most of the Linux PTE is ready to load into the TLB LO. + * We set ZSEL, where only the LS-bit determines user access. + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * If shared is set, we cause a zero PID->TID load. + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ + brid finish_tlb_load + andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ + TLB_ZSEL(1) | TLB_ATTR_MASK + ex7: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mts rpid, r11 + nop + bri 4 + RESTORE_STATE; + bri page_fault_data_trap + + /* 0x13 - Instruction TLB Miss Exception + * Nearly the same as above, except we get our information from + * different registers and bailout to a different point. + */ + handle_instruction_tlb_miss_exception: + /* Working registers already saved: R3, R4, R5, R6 + * R3 = ESR + */ + mfs r11, rpid + nop + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + ori r4, r0, CONFIG_KERNEL_START + cmpu r4, r3, r4 + bgti r4, ex8 + ori r4, r0, swapper_pg_dir + mts rpid, r0 /* TLB will have 0 TID */ + nop + bri ex9 + + /* Get the PGD for the current thread. */ + ex8: + /* get current task address */ + addi r4 ,CURRENT_TASK, TOPHYS(0); + lwi r4, r4, TASK_THREAD+PGDIR + ex9: + tophys(r4,r4) + /* Create L1 (pgdir/pmd) address */ + bsrli r5, r3, PGDIR_SHIFT - 2 + andi r5, r5, PAGE_SIZE - 4 +/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ + or r4, r4, r5 + lwi r4, r4, 0 /* Get L1 entry */ + andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ + beqi r5, ex10 /* Bail if no table */ + + tophys(r5,r5) + bsrli r6, r3, PTE_SHIFT /* Compute PTE address */ + andi r6, r6, PAGE_SIZE - 4 + or r5, r5, r6 + lwi r4, r5, 0 /* Get Linux PTE */ + + andi r6, r4, _PAGE_PRESENT + beqi r6, ex10 + + ori r4, r4, _PAGE_ACCESSED + swi r4, r5, 0 + + /* Most of the Linux PTE is ready to load into the TLB LO. + * We set ZSEL, where only the LS-bit determines user access. + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * If shared is set, we cause a zero PID->TID load. + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ + brid finish_tlb_load + andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ + TLB_ZSEL(1) | TLB_ATTR_MASK + ex10: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mts rpid, r11 + nop + bri 4 + RESTORE_STATE; + bri page_fault_instr_trap + +/* Both the instruction and data TLB miss get to this point to load the TLB. + * r3 - EA of fault + * r4 - TLB LO (info from Linux PTE) + * r5, r6 - available to use + * PID - loaded with proper value when we get here + * Upon exit, we reload everything and RFI. + * A common place to load the TLB. + */ +.section .data +.align 4 +.global tlb_skip + tlb_skip: + .long MICROBLAZE_TLB_SKIP + tlb_index: + /* MS: storing last used tlb index */ + .long MICROBLAZE_TLB_SIZE/2 +.previous + finish_tlb_load: + /* MS: load the last used TLB index. */ + lwi r5, r0, TOPHYS(tlb_index) + addik r5, r5, 1 /* MS: inc tlb_index -> use next one */ + +/* MS: FIXME this is potential fault, because this is mask not count */ + andi r5, r5, MICROBLAZE_TLB_SIZE - 1 + ori r6, r0, 1 + cmp r31, r5, r6 + blti r31, ex12 + lwi r5, r0, TOPHYS(tlb_skip) + ex12: + /* MS: save back current TLB index */ + swi r5, r0, TOPHYS(tlb_index) + + ori r4, r4, _PAGE_HWEXEC /* make it executable */ + mts rtlbx, r5 /* MS: save current TLB */ + nop + mts rtlblo, r4 /* MS: save to TLB LO */ + nop + + /* Create EPN. This is the faulting address plus a static + * set of bits. These are size, valid, E, U0, and ensure + * bits 20 and 21 are zero. + */ + andi r3, r3, PAGE_MASK +#ifdef CONFIG_MICROBLAZE_64K_PAGES + ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_64K) +#elif CONFIG_MICROBLAZE_16K_PAGES + ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_16K) +#else + ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K) +#endif + mts rtlbhi, r3 /* Load TLB HI */ + nop + + /* Done...restore registers and get out of here. */ + mts rpid, r11 + nop + bri 4 + RESTORE_STATE; + rted r17, 0 + nop + + /* extern void giveup_fpu(struct task_struct *prev) + * + * The MicroBlaze processor may have an FPU, so this should not just + * return: TBD. + */ + .globl giveup_fpu; + .align 4; + giveup_fpu: + bralid r15,0 /* TBD */ + nop + + /* At present, this routine just hangs. - extern void abort(void) */ + .globl abort; + .align 4; + abort: + br r0 + + .globl set_context; + .align 4; + set_context: + mts rpid, r5 /* Shadow TLBs are automatically */ + nop + bri 4 /* flushed by changing PID */ + rtsd r15,8 + nop + +#endif +.end _hw_exception_handler + +#ifdef CONFIG_MMU +/* Unaligned data access exception last on a 4k page for MMU. + * When this is called, we are in virtual mode with exceptions enabled + * and registers 1-13,15,17,18 saved. + * + * R3 = ESR + * R4 = EAR + * R7 = pointer to saved registers (struct pt_regs *regs) + * + * This handler perform the access, and returns via ret_from_exc. + */ +.global _unaligned_data_exception +.ent _unaligned_data_exception +_unaligned_data_exception: + andi r8, r3, 0x3E0; /* Mask and extract the register operand */ + bsrli r8, r8, 2; /* r8 >> 2 = register operand * 8 */ + andi r6, r3, 0x400; /* Extract ESR[S] */ + bneid r6, ex_sw_vm; + andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ +ex_lw_vm: + beqid r6, ex_lhw_vm; +load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */ +/* Load a word, byte-by-byte from destination address and save it in tmp space*/ + addik r6, r0, ex_tmp_data_loc_0; + sbi r5, r6, 0; +load2: lbui r5, r4, 1; + sbi r5, r6, 1; +load3: lbui r5, r4, 2; + sbi r5, r6, 2; +load4: lbui r5, r4, 3; + sbi r5, r6, 3; + brid ex_lw_tail_vm; +/* Get the destination register value into r3 - delay slot */ + lwi r3, r6, 0; +ex_lhw_vm: + /* Load a half-word, byte-by-byte from destination address and + * save it in tmp space */ + addik r6, r0, ex_tmp_data_loc_0; + sbi r5, r6, 0; +load5: lbui r5, r4, 1; + sbi r5, r6, 1; + lhui r3, r6, 0; /* Get the destination register value into r3 */ +ex_lw_tail_vm: + /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */ + addik r5, r8, lw_table_vm; + bra r5; +ex_lw_end_vm: /* Exception handling of load word, ends */ + brai ret_from_exc; +ex_sw_vm: +/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */ + addik r5, r8, sw_table_vm; + bra r5; +ex_sw_tail_vm: + addik r5, r0, ex_tmp_data_loc_0; + beqid r6, ex_shw_vm; + swi r3, r5, 0; /* Get the word - delay slot */ + /* Store the word, byte-by-byte into destination address */ + lbui r3, r5, 0; +store1: sbi r3, r4, 0; + lbui r3, r5, 1; +store2: sbi r3, r4, 1; + lbui r3, r5, 2; +store3: sbi r3, r4, 2; + lbui r3, r5, 3; + brid ret_from_exc; +store4: sbi r3, r4, 3; /* Delay slot */ +ex_shw_vm: + /* Store the lower half-word, byte-by-byte into destination address */ +#ifdef __MICROBLAZEEL__ + lbui r3, r5, 0; +store5: sbi r3, r4, 0; + lbui r3, r5, 1; + brid ret_from_exc; +store6: sbi r3, r4, 1; /* Delay slot */ +#else + lbui r3, r5, 2; +store5: sbi r3, r4, 0; + lbui r3, r5, 3; + brid ret_from_exc; +store6: sbi r3, r4, 1; /* Delay slot */ +#endif + +ex_sw_end_vm: /* Exception handling of store word, ends. */ + +/* We have to prevent cases that get/put_user macros get unaligned pointer + * to bad page area. We have to find out which origin instruction caused it + * and called fixup for that origin instruction not instruction in unaligned + * handler */ +ex_unaligned_fixup: + ori r5, r7, 0 /* setup pointer to pt_regs */ + lwi r6, r7, PT_PC; /* faulting address is one instruction above */ + addik r6, r6, -4 /* for finding proper fixup */ + swi r6, r7, PT_PC; /* a save back it to PT_PC */ + addik r7, r0, SIGSEGV + /* call bad_page_fault for finding aligned fixup, fixup address is saved + * in PT_PC which is used as return address from exception */ + addik r15, r0, ret_from_exc-8 /* setup return address */ + brid bad_page_fault + nop + +/* We prevent all load/store because it could failed any attempt to access */ +.section __ex_table,"a"; + .word load1,ex_unaligned_fixup; + .word load2,ex_unaligned_fixup; + .word load3,ex_unaligned_fixup; + .word load4,ex_unaligned_fixup; + .word load5,ex_unaligned_fixup; + .word store1,ex_unaligned_fixup; + .word store2,ex_unaligned_fixup; + .word store3,ex_unaligned_fixup; + .word store4,ex_unaligned_fixup; + .word store5,ex_unaligned_fixup; + .word store6,ex_unaligned_fixup; +.previous; +.end _unaligned_data_exception +#endif /* CONFIG_MMU */ + +.global ex_handler_unhandled +ex_handler_unhandled: +/* FIXME add handle function for unhandled exception - dump register */ + bri 0 + +/* + * hw_exception_handler Jump Table + * - Contains code snippets for each register that caused the unalign exception + * - Hence exception handler is NOT self-modifying + * - Separate table for load exceptions and store exceptions. + * - Each table is of size: (8 * 32) = 256 bytes + */ + +.section .text +.align 4 +lw_table: +lw_r0: R3_TO_LWREG (0); +lw_r1: LWREG_NOP; +lw_r2: R3_TO_LWREG (2); +lw_r3: R3_TO_LWREG_V (3); +lw_r4: R3_TO_LWREG_V (4); +lw_r5: R3_TO_LWREG_V (5); +lw_r6: R3_TO_LWREG_V (6); +lw_r7: R3_TO_LWREG (7); +lw_r8: R3_TO_LWREG (8); +lw_r9: R3_TO_LWREG (9); +lw_r10: R3_TO_LWREG (10); +lw_r11: R3_TO_LWREG (11); +lw_r12: R3_TO_LWREG (12); +lw_r13: R3_TO_LWREG (13); +lw_r14: R3_TO_LWREG (14); +lw_r15: R3_TO_LWREG (15); +lw_r16: R3_TO_LWREG (16); +lw_r17: LWREG_NOP; +lw_r18: R3_TO_LWREG (18); +lw_r19: R3_TO_LWREG (19); +lw_r20: R3_TO_LWREG (20); +lw_r21: R3_TO_LWREG (21); +lw_r22: R3_TO_LWREG (22); +lw_r23: R3_TO_LWREG (23); +lw_r24: R3_TO_LWREG (24); +lw_r25: R3_TO_LWREG (25); +lw_r26: R3_TO_LWREG (26); +lw_r27: R3_TO_LWREG (27); +lw_r28: R3_TO_LWREG (28); +lw_r29: R3_TO_LWREG (29); +lw_r30: R3_TO_LWREG (30); +#ifdef CONFIG_MMU +lw_r31: R3_TO_LWREG_V (31); +#else +lw_r31: R3_TO_LWREG (31); +#endif + +sw_table: +sw_r0: SWREG_TO_R3 (0); +sw_r1: SWREG_NOP; +sw_r2: SWREG_TO_R3 (2); +sw_r3: SWREG_TO_R3_V (3); +sw_r4: SWREG_TO_R3_V (4); +sw_r5: SWREG_TO_R3_V (5); +sw_r6: SWREG_TO_R3_V (6); +sw_r7: SWREG_TO_R3 (7); +sw_r8: SWREG_TO_R3 (8); +sw_r9: SWREG_TO_R3 (9); +sw_r10: SWREG_TO_R3 (10); +sw_r11: SWREG_TO_R3 (11); +sw_r12: SWREG_TO_R3 (12); +sw_r13: SWREG_TO_R3 (13); +sw_r14: SWREG_TO_R3 (14); +sw_r15: SWREG_TO_R3 (15); +sw_r16: SWREG_TO_R3 (16); +sw_r17: SWREG_NOP; +sw_r18: SWREG_TO_R3 (18); +sw_r19: SWREG_TO_R3 (19); +sw_r20: SWREG_TO_R3 (20); +sw_r21: SWREG_TO_R3 (21); +sw_r22: SWREG_TO_R3 (22); +sw_r23: SWREG_TO_R3 (23); +sw_r24: SWREG_TO_R3 (24); +sw_r25: SWREG_TO_R3 (25); +sw_r26: SWREG_TO_R3 (26); +sw_r27: SWREG_TO_R3 (27); +sw_r28: SWREG_TO_R3 (28); +sw_r29: SWREG_TO_R3 (29); +sw_r30: SWREG_TO_R3 (30); +#ifdef CONFIG_MMU +sw_r31: SWREG_TO_R3_V (31); +#else +sw_r31: SWREG_TO_R3 (31); +#endif + +#ifdef CONFIG_MMU +lw_table_vm: +lw_r0_vm: R3_TO_LWREG_VM (0); +lw_r1_vm: R3_TO_LWREG_VM_V (1); +lw_r2_vm: R3_TO_LWREG_VM_V (2); +lw_r3_vm: R3_TO_LWREG_VM_V (3); +lw_r4_vm: R3_TO_LWREG_VM_V (4); +lw_r5_vm: R3_TO_LWREG_VM_V (5); +lw_r6_vm: R3_TO_LWREG_VM_V (6); +lw_r7_vm: R3_TO_LWREG_VM_V (7); +lw_r8_vm: R3_TO_LWREG_VM_V (8); +lw_r9_vm: R3_TO_LWREG_VM_V (9); +lw_r10_vm: R3_TO_LWREG_VM_V (10); +lw_r11_vm: R3_TO_LWREG_VM_V (11); +lw_r12_vm: R3_TO_LWREG_VM_V (12); +lw_r13_vm: R3_TO_LWREG_VM_V (13); +lw_r14_vm: R3_TO_LWREG_VM_V (14); +lw_r15_vm: R3_TO_LWREG_VM_V (15); +lw_r16_vm: R3_TO_LWREG_VM_V (16); +lw_r17_vm: R3_TO_LWREG_VM_V (17); +lw_r18_vm: R3_TO_LWREG_VM_V (18); +lw_r19_vm: R3_TO_LWREG_VM_V (19); +lw_r20_vm: R3_TO_LWREG_VM_V (20); +lw_r21_vm: R3_TO_LWREG_VM_V (21); +lw_r22_vm: R3_TO_LWREG_VM_V (22); +lw_r23_vm: R3_TO_LWREG_VM_V (23); +lw_r24_vm: R3_TO_LWREG_VM_V (24); +lw_r25_vm: R3_TO_LWREG_VM_V (25); +lw_r26_vm: R3_TO_LWREG_VM_V (26); +lw_r27_vm: R3_TO_LWREG_VM_V (27); +lw_r28_vm: R3_TO_LWREG_VM_V (28); +lw_r29_vm: R3_TO_LWREG_VM_V (29); +lw_r30_vm: R3_TO_LWREG_VM_V (30); +lw_r31_vm: R3_TO_LWREG_VM_V (31); + +sw_table_vm: +sw_r0_vm: SWREG_TO_R3_VM (0); +sw_r1_vm: SWREG_TO_R3_VM_V (1); +sw_r2_vm: SWREG_TO_R3_VM_V (2); +sw_r3_vm: SWREG_TO_R3_VM_V (3); +sw_r4_vm: SWREG_TO_R3_VM_V (4); +sw_r5_vm: SWREG_TO_R3_VM_V (5); +sw_r6_vm: SWREG_TO_R3_VM_V (6); +sw_r7_vm: SWREG_TO_R3_VM_V (7); +sw_r8_vm: SWREG_TO_R3_VM_V (8); +sw_r9_vm: SWREG_TO_R3_VM_V (9); +sw_r10_vm: SWREG_TO_R3_VM_V (10); +sw_r11_vm: SWREG_TO_R3_VM_V (11); +sw_r12_vm: SWREG_TO_R3_VM_V (12); +sw_r13_vm: SWREG_TO_R3_VM_V (13); +sw_r14_vm: SWREG_TO_R3_VM_V (14); +sw_r15_vm: SWREG_TO_R3_VM_V (15); +sw_r16_vm: SWREG_TO_R3_VM_V (16); +sw_r17_vm: SWREG_TO_R3_VM_V (17); +sw_r18_vm: SWREG_TO_R3_VM_V (18); +sw_r19_vm: SWREG_TO_R3_VM_V (19); +sw_r20_vm: SWREG_TO_R3_VM_V (20); +sw_r21_vm: SWREG_TO_R3_VM_V (21); +sw_r22_vm: SWREG_TO_R3_VM_V (22); +sw_r23_vm: SWREG_TO_R3_VM_V (23); +sw_r24_vm: SWREG_TO_R3_VM_V (24); +sw_r25_vm: SWREG_TO_R3_VM_V (25); +sw_r26_vm: SWREG_TO_R3_VM_V (26); +sw_r27_vm: SWREG_TO_R3_VM_V (27); +sw_r28_vm: SWREG_TO_R3_VM_V (28); +sw_r29_vm: SWREG_TO_R3_VM_V (29); +sw_r30_vm: SWREG_TO_R3_VM_V (30); +sw_r31_vm: SWREG_TO_R3_VM_V (31); +#endif /* CONFIG_MMU */ + +/* Temporary data structures used in the handler */ +.section .data +.align 4 +ex_tmp_data_loc_0: + .byte 0 +ex_tmp_data_loc_1: + .byte 0 +ex_tmp_data_loc_2: + .byte 0 +ex_tmp_data_loc_3: + .byte 0 +ex_reg_op: + .byte 0 diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c new file mode 100644 index 000000000..903dad822 --- /dev/null +++ b/arch/microblaze/kernel/irq.c @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/ftrace.h> +#include <linux/kernel.h> +#include <linux/hardirq.h> +#include <linux/interrupt.h> +#include <linux/irqflags.h> +#include <linux/seq_file.h> +#include <linux/kernel_stat.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/of_irq.h> + +static u32 concurrent_irq; + +void __irq_entry do_IRQ(struct pt_regs *regs) +{ + unsigned int irq; + struct pt_regs *old_regs = set_irq_regs(regs); + trace_hardirqs_off(); + + irq_enter(); + irq = xintc_get_irq(); +next_irq: + BUG_ON(!irq); + generic_handle_irq(irq); + + irq = xintc_get_irq(); + if (irq != -1U) { + pr_debug("next irq: %d\n", irq); + ++concurrent_irq; + goto next_irq; + } + + irq_exit(); + set_irq_regs(old_regs); + trace_hardirqs_on(); +} + +void __init init_IRQ(void) +{ + /* process the entire interrupt tree in one go */ + irqchip_init(); +} diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c new file mode 100644 index 000000000..6366f69d1 --- /dev/null +++ b/arch/microblaze/kernel/kgdb.c @@ -0,0 +1,152 @@ +/* + * Microblaze KGDB support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/kgdb.h> +#include <linux/kdebug.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <asm/cacheflush.h> +#include <asm/asm-offsets.h> +#include <asm/kgdb.h> +#include <asm/pvr.h> + +#define GDB_REG 0 +#define GDB_PC 32 +#define GDB_MSR 33 +#define GDB_EAR 34 +#define GDB_ESR 35 +#define GDB_FSR 36 +#define GDB_BTR 37 +#define GDB_PVR 38 +#define GDB_REDR 50 +#define GDB_RPID 51 +#define GDB_RZPR 52 +#define GDB_RTLBX 53 +#define GDB_RTLBSX 54 /* mfs can't read it */ +#define GDB_RTLBLO 55 +#define GDB_RTLBHI 56 + +/* keep pvr separately because it is unchangeble */ +static struct pvr_s pvr; + +void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + unsigned int i; + unsigned long *pt_regb = (unsigned long *)regs; + int temp; + + /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ + for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) + gdb_regs[i] = pt_regb[i]; + + /* Branch target register can't be changed */ + __asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : ); + gdb_regs[GDB_BTR] = temp; + + /* pvr part - we have 11 pvr regs */ + for (i = 0; i < sizeof(struct pvr_s)/4; i++) + gdb_regs[GDB_PVR + i] = pvr.pvr[i]; + + /* read special registers - can't be changed */ + __asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : ); + gdb_regs[GDB_REDR] = temp; + __asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : ); + gdb_regs[GDB_RPID] = temp; + __asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : ); + gdb_regs[GDB_RZPR] = temp; + __asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : ); + gdb_regs[GDB_RTLBX] = temp; + __asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : ); + gdb_regs[GDB_RTLBLO] = temp; + __asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : ); + gdb_regs[GDB_RTLBHI] = temp; +} + +void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + unsigned int i; + unsigned long *pt_regb = (unsigned long *)regs; + + /* pt_regs and gdb_regs have the same 37 values. + * The rest of gdb_regs are unused and can't be changed. + * r0 register value can't be changed too. */ + for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++) + pt_regb[i] = gdb_regs[i]; +} + +asmlinkage void microblaze_kgdb_break(struct pt_regs *regs) +{ + if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) + return; + + /* Jump over the first arch_kgdb_breakpoint which is barrier to + * get kgdb work. The same solution is used for powerpc */ + if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) + regs->pc += BREAK_INSTR_SIZE; +} + +/* untested */ +void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) +{ + unsigned int i; + unsigned long *pt_regb = (unsigned long *)(p->thread.regs); + + /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ + for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) + gdb_regs[i] = pt_regb[i]; + + /* pvr part - we have 11 pvr regs */ + for (i = 0; i < sizeof(struct pvr_s)/4; i++) + gdb_regs[GDB_PVR + i] = pvr.pvr[i]; +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->pc = ip; +} + +int kgdb_arch_handle_exception(int vector, int signo, int err_code, + char *remcom_in_buffer, char *remcom_out_buffer, + struct pt_regs *regs) +{ + char *ptr; + unsigned long address; + + switch (remcom_in_buffer[0]) { + case 'c': + /* handle the optional parameter */ + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &address)) + regs->pc = address; + + return 0; + } + return -1; /* this means that we do not want to exit from the handler */ +} + +int kgdb_arch_init(void) +{ + get_pvr(&pvr); /* Fill PVR structure */ + return 0; +} + +void kgdb_arch_exit(void) +{ + /* Nothing to do */ +} + +/* + * Global data + */ +struct kgdb_arch arch_kgdb_ops = { +#ifdef __MICROBLAZEEL__ + .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */ +#else + .gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */ +#endif +}; diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S new file mode 100644 index 000000000..fed9da5de --- /dev/null +++ b/arch/microblaze/kernel/mcount.S @@ -0,0 +1,165 @@ +/* + * Low-level ftrace handling + * + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#include <linux/linkage.h> + +#define NOALIGN_ENTRY(name) .globl name; name: + +/* FIXME MS: I think that I don't need to save all regs */ +#define SAVE_REGS \ + addik r1, r1, -120; \ + swi r2, r1, 4; \ + swi r3, r1, 8; \ + swi r4, r1, 12; \ + swi r5, r1, 116; \ + swi r6, r1, 16; \ + swi r7, r1, 20; \ + swi r8, r1, 24; \ + swi r9, r1, 28; \ + swi r10, r1, 32; \ + swi r11, r1, 36; \ + swi r12, r1, 40; \ + swi r13, r1, 44; \ + swi r14, r1, 48; \ + swi r16, r1, 52; \ + swi r17, r1, 56; \ + swi r18, r1, 60; \ + swi r19, r1, 64; \ + swi r20, r1, 68; \ + swi r21, r1, 72; \ + swi r22, r1, 76; \ + swi r23, r1, 80; \ + swi r24, r1, 84; \ + swi r25, r1, 88; \ + swi r26, r1, 92; \ + swi r27, r1, 96; \ + swi r28, r1, 100; \ + swi r29, r1, 104; \ + swi r30, r1, 108; \ + swi r31, r1, 112; + +#define RESTORE_REGS \ + lwi r2, r1, 4; \ + lwi r3, r1, 8; \ + lwi r4, r1, 12; \ + lwi r5, r1, 116; \ + lwi r6, r1, 16; \ + lwi r7, r1, 20; \ + lwi r8, r1, 24; \ + lwi r9, r1, 28; \ + lwi r10, r1, 32; \ + lwi r11, r1, 36; \ + lwi r12, r1, 40; \ + lwi r13, r1, 44; \ + lwi r14, r1, 48; \ + lwi r16, r1, 52; \ + lwi r17, r1, 56; \ + lwi r18, r1, 60; \ + lwi r19, r1, 64; \ + lwi r20, r1, 68; \ + lwi r21, r1, 72; \ + lwi r22, r1, 76; \ + lwi r23, r1, 80; \ + lwi r24, r1, 84; \ + lwi r25, r1, 88; \ + lwi r26, r1, 92; \ + lwi r27, r1, 96; \ + lwi r28, r1, 100; \ + lwi r29, r1, 104; \ + lwi r30, r1, 108; \ + lwi r31, r1, 112; \ + addik r1, r1, 120; + +ENTRY(ftrace_stub) + rtsd r15, 8; + nop; + +ENTRY(_mcount) +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller) + /* MS: It is just barrier which is removed from C code */ + rtsd r15, 8 + nop +#endif /* CONFIG_DYNAMIC_FTRACE */ + SAVE_REGS + swi r15, r1, 0; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifndef CONFIG_DYNAMIC_FTRACE + lwi r5, r0, ftrace_graph_return; + addik r6, r0, ftrace_stub; /* asm implementation */ + cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */ + beqid r5, end_graph_tracer; + nop; + + lwi r6, r0, ftrace_graph_entry; + addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */ + cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */ + beqid r5, end_graph_tracer; + nop; +#else /* CONFIG_DYNAMIC_FTRACE */ +NOALIGN_ENTRY(ftrace_call_graph) + /* MS: jump over graph function - replaced from C code */ + bri end_graph_tracer +#endif /* CONFIG_DYNAMIC_FTRACE */ + addik r5, r1, 120; /* MS: load parent addr */ + addik r6, r15, 0; /* MS: load current function addr */ + bralid r15, prepare_ftrace_return; + nop; + /* MS: graph was taken that's why - can jump over function trace */ + brid end; + nop; +end_graph_tracer: +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifndef CONFIG_DYNAMIC_FTRACE + /* MS: test function trace if is taken or not */ + lwi r20, r0, ftrace_trace_function; + addik r6, r0, ftrace_stub; + cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */ + beqid r5, end; /* MS: not taken -> jump over */ + nop; +#else /* CONFIG_DYNAMIC_FTRACE */ +NOALIGN_ENTRY(ftrace_call) +/* instruction for setup imm FUNC_part1, addik r20, r0, FUNC_part2 */ + nop + nop +#endif /* CONFIG_DYNAMIC_FTRACE */ +/* static normal trace */ + lwi r6, r1, 120; /* MS: load parent addr */ + addik r5, r15, -4; /* MS: load current function addr */ + /* MS: here is dependency on previous code */ + brald r15, r20; /* MS: jump to ftrace handler */ + nop; +end: + lwi r15, r1, 0; + RESTORE_REGS + + rtsd r15, 8; /* MS: jump back */ + nop; + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(return_to_handler) + nop; /* MS: just barrier for rtsd r15, 8 */ + nop; + SAVE_REGS + swi r15, r1, 0; + + /* MS: find out returning address */ + bralid r15, ftrace_return_to_handler; + nop; + + /* MS: return value from ftrace_return_to_handler is my returning addr + * must be before restore regs because I have to restore r3 content */ + addik r15, r3, 0; + RESTORE_REGS + + rtsd r15, 8; /* MS: jump back */ + nop; +#endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c new file mode 100644 index 000000000..9f1d02c4c --- /dev/null +++ b/arch/microblaze/kernel/microblaze_ksyms.c @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/export.h> +#include <linux/string.h> +#include <linux/cryptohash.h> +#include <linux/delay.h> +#include <linux/in6.h> +#include <linux/syscalls.h> + +#include <asm/checksum.h> +#include <asm/cacheflush.h> +#include <linux/io.h> +#include <asm/page.h> +#include <linux/ftrace.h> +#include <linux/uaccess.h> + +#ifdef CONFIG_FUNCTION_TRACER +extern void _mcount(void); +EXPORT_SYMBOL(_mcount); +#endif + +/* + * Assembly functions that may be used (directly or indirectly) by modules + */ +EXPORT_SYMBOL(__copy_tofrom_user); +EXPORT_SYMBOL(__strncpy_user); + +#ifdef CONFIG_OPT_LIB_ASM +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memmove); +#endif + +#ifdef CONFIG_MMU +EXPORT_SYMBOL(empty_zero_page); +#endif + +EXPORT_SYMBOL(mbc); + +extern void __divsi3(void); +EXPORT_SYMBOL(__divsi3); +extern void __modsi3(void); +EXPORT_SYMBOL(__modsi3); +extern void __mulsi3(void); +EXPORT_SYMBOL(__mulsi3); +extern void __udivsi3(void); +EXPORT_SYMBOL(__udivsi3); +extern void __umodsi3(void); +EXPORT_SYMBOL(__umodsi3); diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S new file mode 100644 index 000000000..6759af688 --- /dev/null +++ b/arch/microblaze/kernel/misc.S @@ -0,0 +1,65 @@ +/* + * Miscellaneous low-level MMU functions. + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2007 Xilinx, Inc. All rights reserved. + * + * Derived from arch/ppc/kernel/misc.S + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#include <linux/linkage.h> +#include <linux/sys.h> +#include <asm/unistd.h> +#include <linux/errno.h> +#include <asm/mmu.h> +#include <asm/page.h> + + .text +/* + * Flush MMU TLB + * + * We avoid flushing the pinned 0, 1 and possibly 2 entries. + */ +.globl _tlbia; +.type _tlbia, @function +.align 4; +_tlbia: + lwi r12, r0, tlb_skip; + /* isync */ +_tlbia_1: + mts rtlbx, r12 + nop + mts rtlbhi, r0 /* flush: ensure V is clear */ + nop + rsubi r11, r12, MICROBLAZE_TLB_SIZE - 1 + bneid r11, _tlbia_1 /* loop for all entries */ + addik r12, r12, 1 + /* sync */ + rtsd r15, 8 + nop + .size _tlbia, . - _tlbia + +/* + * Flush MMU TLB for a particular address (in r5) + */ +.globl _tlbie; +.type _tlbie, @function +.align 4; +_tlbie: + mts rtlbsx, r5 /* look up the address in TLB */ + nop + mfs r12, rtlbx /* Retrieve index */ + nop + blti r12, _tlbie_1 /* Check if found */ + mts rtlbhi, r0 /* flush: ensure V is clear */ + nop +_tlbie_1: + rtsd r15, 8 + nop + + .size _tlbie, . - _tlbie diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c new file mode 100644 index 000000000..182e6be85 --- /dev/null +++ b/arch/microblaze/kernel/module.c @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/export.h> +#include <linux/moduleloader.h> +#include <linux/kernel.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/string.h> + +#include <asm/pgtable.h> +#include <asm/cacheflush.h> + +int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, struct module *module) +{ + + unsigned int i; + Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + unsigned long int *location; + unsigned long int value; +#if __GNUC__ < 4 + unsigned long int old_value; +#endif + + pr_debug("Applying add relocation section %u to %u\n", + relsec, sechdrs[relsec].sh_info); + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { + + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rela[i].r_offset; + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rela[i].r_info); + value = sym->st_value + rela[i].r_addend; + + switch (ELF32_R_TYPE(rela[i].r_info)) { + + /* + * Be careful! mb-gcc / mb-ld splits the relocs between the + * text and the reloc table. In general this means we must + * read the current contents of (*location), add any offset + * then store the result back in + */ + + case R_MICROBLAZE_32: +#if __GNUC__ < 4 + old_value = *location; + *location = value + old_value; + + pr_debug("R_MICROBLAZE_32 (%08lx->%08lx)\n", + old_value, value); +#else + *location = value; +#endif + break; + + case R_MICROBLAZE_64: +#if __GNUC__ < 4 + /* Split relocs only required/used pre gcc4.1.1 */ + old_value = ((location[0] & 0x0000FFFF) << 16) | + (location[1] & 0x0000FFFF); + value += old_value; +#endif + location[0] = (location[0] & 0xFFFF0000) | + (value >> 16); + location[1] = (location[1] & 0xFFFF0000) | + (value & 0xFFFF); +#if __GNUC__ < 4 + pr_debug("R_MICROBLAZE_64 (%08lx->%08lx)\n", + old_value, value); +#endif + break; + + case R_MICROBLAZE_64_PCREL: +#if __GNUC__ < 4 + old_value = (location[0] & 0xFFFF) << 16 | + (location[1] & 0xFFFF); + value -= old_value; +#endif + value -= (unsigned long int)(location) + 4; + location[0] = (location[0] & 0xFFFF0000) | + (value >> 16); + location[1] = (location[1] & 0xFFFF0000) | + (value & 0xFFFF); + pr_debug("R_MICROBLAZE_64_PCREL (%08lx)\n", + value); + break; + + case R_MICROBLAZE_32_PCREL_LO: + pr_debug("R_MICROBLAZE_32_PCREL_LO\n"); + break; + + case R_MICROBLAZE_64_NONE: + pr_debug("R_MICROBLAZE_64_NONE\n"); + break; + + case R_MICROBLAZE_NONE: + pr_debug("R_MICROBLAZE_NONE\n"); + break; + + default: + pr_err("module %s: Unknown relocation: %u\n", + module->name, + ELF32_R_TYPE(rela[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} + +int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, + struct module *module) +{ + flush_dcache(); + return 0; +} diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c new file mode 100644 index 000000000..6527ec22f --- /dev/null +++ b/arch/microblaze/kernel/process.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/cpu.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/pm.h> +#include <linux/tick.h> +#include <linux/bitops.h> +#include <linux/ptrace.h> +#include <asm/pgalloc.h> +#include <linux/uaccess.h> /* for USER_DS macros */ +#include <asm/cacheflush.h> + +void show_regs(struct pt_regs *regs) +{ + show_regs_print_info(KERN_INFO); + + pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode); + pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n", + regs->r1, regs->r2, regs->r3, regs->r4); + pr_info(" r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n", + regs->r5, regs->r6, regs->r7, regs->r8); + pr_info(" r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n", + regs->r9, regs->r10, regs->r11, regs->r12); + pr_info(" r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n", + regs->r13, regs->r14, regs->r15, regs->r16); + pr_info(" r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n", + regs->r17, regs->r18, regs->r19, regs->r20); + pr_info(" r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n", + regs->r21, regs->r22, regs->r23, regs->r24); + pr_info(" r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n", + regs->r25, regs->r26, regs->r27, regs->r28); + pr_info(" r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n", + regs->r29, regs->r30, regs->r31, regs->pc); + pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n", + regs->msr, regs->ear, regs->esr, regs->fsr); +} + +void (*pm_power_off)(void) = NULL; +EXPORT_SYMBOL(pm_power_off); + +void flush_thread(void) +{ +} + +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long arg, struct task_struct *p) +{ + struct pt_regs *childregs = task_pt_regs(p); + struct thread_info *ti = task_thread_info(p); + + if (unlikely(p->flags & PF_KTHREAD)) { + /* if we're creating a new kernel thread then just zeroing all + * the registers. That's OK for a brand new thread.*/ + memset(childregs, 0, sizeof(struct pt_regs)); + memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); + ti->cpu_context.r1 = (unsigned long)childregs; + ti->cpu_context.r20 = (unsigned long)usp; /* fn */ + ti->cpu_context.r19 = (unsigned long)arg; + childregs->pt_mode = 1; + local_save_flags(childregs->msr); +#ifdef CONFIG_MMU + ti->cpu_context.msr = childregs->msr & ~MSR_IE; +#endif + ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8; + return 0; + } + *childregs = *current_pt_regs(); + if (usp) + childregs->r1 = usp; + + memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); + ti->cpu_context.r1 = (unsigned long)childregs; +#ifndef CONFIG_MMU + ti->cpu_context.msr = (unsigned long)childregs->msr; +#else + childregs->msr |= MSR_UMS; + + /* we should consider the fact that childregs is a copy of the parent + * regs which were saved immediately after entering the kernel state + * before enabling VM. This MSR will be restored in switch_to and + * RETURN() and we want to have the right machine state there + * specifically this state must have INTs disabled before and enabled + * after performing rtbd + * compose the right MSR for RETURN(). It will work for switch_to also + * excepting for VM and UMS + * don't touch UMS , CARRY and cache bits + * right now MSR is a copy of parent one */ + childregs->msr &= ~MSR_EIP; + childregs->msr |= MSR_IE; + childregs->msr &= ~MSR_VM; + childregs->msr |= MSR_VMS; + childregs->msr |= MSR_EE; /* exceptions will be enabled*/ + + ti->cpu_context.msr = (childregs->msr|MSR_VM); + ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */ + ti->cpu_context.msr &= ~MSR_IE; +#endif + ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; + + /* + * r21 is the thread reg, r10 is 6th arg to clone + * which contains TLS area + */ + if (clone_flags & CLONE_SETTLS) + childregs->r21 = childregs->r10; + + return 0; +} + +unsigned long get_wchan(struct task_struct *p) +{ +/* TBD (used by procfs) */ + return 0; +} + +/* Set up a thread for executing a new program */ +void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) +{ + regs->pc = pc; + regs->r1 = usp; + regs->pt_mode = 0; +#ifdef CONFIG_MMU + regs->msr |= MSR_UMS; + regs->msr &= ~MSR_VM; +#endif +} + +#ifdef CONFIG_MMU +#include <linux/elfcore.h> +/* + * Set up a thread for executing a new program + */ +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) +{ + return 0; /* MicroBlaze has no separate FPU registers */ +} +#endif /* CONFIG_MMU */ + +void arch_cpu_idle(void) +{ + local_irq_enable(); +} diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c new file mode 100644 index 000000000..c76c93b90 --- /dev/null +++ b/arch/microblaze/kernel/prom.c @@ -0,0 +1,34 @@ +/* + * Procedures for creating, accessing and interpreting the device tree. + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. + * {engebret|bergner}@us.ibm.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/memblock.h> +#include <linux/of_fdt.h> + +void __init early_init_devtree(void *params) +{ + pr_debug(" -> early_init_devtree(%p)\n", params); + + early_init_dt_scan(params); + if (!strlen(boot_command_line)) + strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); + + memblock_allow_resize(); + + pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size()); + + pr_debug(" <- early_init_devtree()\n"); +} diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c new file mode 100644 index 000000000..badd28688 --- /dev/null +++ b/arch/microblaze/kernel/ptrace.c @@ -0,0 +1,170 @@ +/* + * `ptrace' system call + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2004-2007 John Williams <john.williams@petalogix.com> + * + * derived from arch/v850/kernel/ptrace.c + * + * Copyright (C) 2002,03 NEC Electronics Corporation + * Copyright (C) 2002,03 Miles Bader <miles@gnu.org> + * + * Derived from arch/mips/kernel/ptrace.c: + * + * Copyright (C) 1992 Ross Biro + * Copyright (C) Linus Torvalds + * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle + * Copyright (C) 1996 David S. Miller + * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 1999 MIPS Technologies, Inc. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/ptrace.h> +#include <linux/signal.h> +#include <linux/elf.h> +#include <linux/audit.h> +#include <linux/seccomp.h> +#include <linux/tracehook.h> + +#include <linux/errno.h> +#include <asm/processor.h> +#include <linux/uaccess.h> +#include <asm/asm-offsets.h> +#include <asm/cacheflush.h> +#include <asm/syscall.h> +#include <linux/io.h> + +/* Returns the address where the register at REG_OFFS in P is stashed away. */ +static microblaze_reg_t *reg_save_addr(unsigned reg_offs, + struct task_struct *t) +{ + struct pt_regs *regs; + + /* + * Three basic cases: + * + * (1) A register normally saved before calling the scheduler, is + * available in the kernel entry pt_regs structure at the top + * of the kernel stack. The kernel trap/irq exit path takes + * care to save/restore almost all registers for ptrace'd + * processes. + * + * (2) A call-clobbered register, where the process P entered the + * kernel via [syscall] trap, is not stored anywhere; that's + * OK, because such registers are not expected to be preserved + * when the trap returns anyway (so we don't actually bother to + * test for this case). + * + * (3) A few registers not used at all by the kernel, and so + * normally never saved except by context-switches, are in the + * context switch state. + */ + + /* Register saved during kernel entry (or not available). */ + regs = task_pt_regs(t); + + return (microblaze_reg_t *)((char *)regs + reg_offs); +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + int rval; + unsigned long val = 0; + + switch (request) { + /* Read/write the word at location ADDR in the registers. */ + case PTRACE_PEEKUSR: + case PTRACE_POKEUSR: + pr_debug("PEEKUSR/POKEUSR : 0x%08lx\n", addr); + rval = 0; + if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) { + /* + * Special requests that don't actually correspond + * to offsets in struct pt_regs. + */ + if (addr == PT_TEXT_ADDR) { + val = child->mm->start_code; + } else if (addr == PT_DATA_ADDR) { + val = child->mm->start_data; + } else if (addr == PT_TEXT_LEN) { + val = child->mm->end_code + - child->mm->start_code; + } else { + rval = -EIO; + } + } else if (addr < PT_SIZE && (addr & 0x3) == 0) { + microblaze_reg_t *reg_addr = reg_save_addr(addr, child); + if (request == PTRACE_PEEKUSR) + val = *reg_addr; + else { +#if 1 + *reg_addr = data; +#else + /* MS potential problem on WB system + * Be aware that reg_addr is virtual address + * virt_to_phys conversion is necessary. + * This could be sensible solution. + */ + u32 paddr = virt_to_phys((u32)reg_addr); + invalidate_icache_range(paddr, paddr + 4); + *reg_addr = data; + flush_dcache_range(paddr, paddr + 4); +#endif + } + } else + rval = -EIO; + + if (rval == 0 && request == PTRACE_PEEKUSR) + rval = put_user(val, (unsigned long __user *)data); + break; + default: + rval = ptrace_request(child, request, addr, data); + } + return rval; +} + +asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs) +{ + unsigned long ret = 0; + + secure_computing_strict(regs->r12); + + if (test_thread_flag(TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) + /* + * Tracing decided this syscall should not happen. + * We'll return a bogus call number to get an ENOSYS + * error, but leave the original number in regs->regs[0]. + */ + ret = -1L; + + audit_syscall_entry(regs->r12, regs->r5, regs->r6, regs->r7, regs->r8); + + return ret ?: regs->r12; +} + +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) +{ + int step; + + audit_syscall_exit(regs); + + step = test_thread_flag(TIF_SINGLESTEP); + if (step || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, step); +} + +void ptrace_disable(struct task_struct *child) +{ + /* nothing to do */ +} diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c new file mode 100644 index 000000000..fcbe1daf6 --- /dev/null +++ b/arch/microblaze/kernel/reset.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/of_platform.h> + +/* Trigger specific functions */ +#ifdef CONFIG_GPIOLIB + +#include <linux/of_gpio.h> + +static int handle; /* reset pin handle */ +static unsigned int reset_val; + +static int of_platform_reset_gpio_probe(void) +{ + int ret; + handle = of_get_named_gpio(of_find_node_by_path("/"), + "hard-reset-gpios", 0); + + if (!gpio_is_valid(handle)) { + pr_info("Skipping unavailable RESET gpio %d (%s)\n", + handle, "reset"); + return -ENODEV; + } + + ret = gpio_request(handle, "reset"); + if (ret < 0) { + pr_info("GPIO pin is already allocated\n"); + return ret; + } + + /* get current setup value */ + reset_val = gpio_get_value(handle); + /* FIXME maybe worth to perform any action */ + pr_debug("Reset: Gpio output state: 0x%x\n", reset_val); + + /* Setup GPIO as output */ + ret = gpio_direction_output(handle, 0); + if (ret < 0) + goto err; + + /* Setup output direction */ + gpio_set_value(handle, 0); + + pr_info("RESET: Registered gpio device: %d, current val: %d\n", + handle, reset_val); + return 0; +err: + gpio_free(handle); + return ret; +} +device_initcall(of_platform_reset_gpio_probe); + + +static void gpio_system_reset(void) +{ + if (gpio_is_valid(handle)) + gpio_set_value(handle, 1 - reset_val); + else + pr_notice("Reset GPIO unavailable - halting!\n"); +} +#else +static void gpio_system_reset(void) +{ + pr_notice("No reset GPIO present - halting!\n"); +} + +void of_platform_reset_gpio_probe(void) +{ + return; +} +#endif + +void machine_restart(char *cmd) +{ + pr_notice("Machine restart...\n"); + gpio_system_reset(); + while (1) + ; +} + +void machine_shutdown(void) +{ + pr_notice("Machine shutdown...\n"); + while (1) + ; +} + +void machine_halt(void) +{ + pr_notice("Machine halt...\n"); + while (1) + ; +} + +void machine_power_off(void) +{ + pr_notice("Machine power off...\n"); + while (1) + ; +} diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c new file mode 100644 index 000000000..bbd6968ce --- /dev/null +++ b/arch/microblaze/kernel/setup.c @@ -0,0 +1,216 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/clk-provider.h> +#include <linux/clocksource.h> +#include <linux/string.h> +#include <linux/seq_file.h> +#include <linux/cpu.h> +#include <linux/initrd.h> +#include <linux/console.h> +#include <linux/debugfs.h> +#include <linux/of_fdt.h> + +#include <asm/setup.h> +#include <asm/sections.h> +#include <asm/page.h> +#include <linux/io.h> +#include <linux/bug.h> +#include <linux/param.h> +#include <linux/pci.h> +#include <linux/cache.h> +#include <linux/of.h> +#include <linux/dma-mapping.h> +#include <asm/cacheflush.h> +#include <asm/entry.h> +#include <asm/cpuinfo.h> + +#include <asm/pgtable.h> + +DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ +DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */ +DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */ +DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ +DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ + +unsigned int boot_cpuid; +/* + * Placed cmd_line to .data section because can be initialized from + * ASM code. Default position is BSS section which is cleared + * in machine_early_init(). + */ +char cmd_line[COMMAND_LINE_SIZE] __attribute__ ((section(".data"))); + +void __init setup_arch(char **cmdline_p) +{ + *cmdline_p = boot_command_line; + + setup_memory(); + parse_early_param(); + + console_verbose(); + + unflatten_device_tree(); + + setup_cpuinfo(); + + microblaze_cache_init(); + + xilinx_pci_init(); + +#if defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +} + +#ifdef CONFIG_MTD_UCLINUX +/* Handle both romfs and cramfs types, without generating unnecessary + code (ie no point checking for CRAMFS if it's not even enabled) */ +inline unsigned get_romfs_len(unsigned *addr) +{ +#ifdef CONFIG_ROMFS_FS + if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */ + return be32_to_cpu(addr[2]); +#endif + +#ifdef CONFIG_CRAMFS + if (addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */ + return le32_to_cpu(addr[1]); +#endif + return 0; +} +#endif /* CONFIG_MTD_UCLINUX_EBSS */ + +unsigned long kernel_tlb; + +void __init machine_early_init(const char *cmdline, unsigned int ram, + unsigned int fdt, unsigned int msr, unsigned int tlb0, + unsigned int tlb1) +{ + unsigned long *src, *dst; + unsigned int offset = 0; + + /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the + * end of kernel. There are two position which we want to check. + * The first is __init_end and the second __bss_start. + */ +#ifdef CONFIG_MTD_UCLINUX + int romfs_size; + unsigned int romfs_base; + char *old_klimit = klimit; + + romfs_base = (ram ? ram : (unsigned int)&__init_end); + romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); + if (!romfs_size) { + romfs_base = (unsigned int)&__bss_start; + romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); + } + + /* Move ROMFS out of BSS before clearing it */ + if (romfs_size > 0) { + memmove(&__bss_stop, (int *)romfs_base, romfs_size); + klimit += romfs_size; + } +#endif + +/* clearing bss section */ + memset(__bss_start, 0, __bss_stop-__bss_start); + memset(_ssbss, 0, _esbss-_ssbss); + +/* initialize device tree for usage in early_printk */ + early_init_devtree(_fdt_start); + + /* setup kernel_tlb after BSS cleaning + * Maybe worth to move to asm code */ + kernel_tlb = tlb0 + tlb1; + /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0, + tlb1, kernel_tlb); */ + + pr_info("Ramdisk addr 0x%08x, ", ram); + if (fdt) + pr_info("FDT at 0x%08x\n", fdt); + else + pr_info("Compiled-in FDT at %p\n", _fdt_start); + +#ifdef CONFIG_MTD_UCLINUX + pr_info("Found romfs @ 0x%08x (0x%08x)\n", + romfs_base, romfs_size); + pr_info("#### klimit %p ####\n", old_klimit); + BUG_ON(romfs_size < 0); /* What else can we do? */ + + pr_info("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", + romfs_size, romfs_base, (unsigned)&__bss_stop); + + pr_info("New klimit: 0x%08x\n", (unsigned)klimit); +#endif + +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + if (msr) { + pr_info("!!!Your kernel has setup MSR instruction but "); + pr_cont("CPU don't have it %x\n", msr); + } +#else + if (!msr) { + pr_info("!!!Your kernel not setup MSR instruction but "); + pr_cont("CPU have it %x\n", msr); + } +#endif + + /* Do not copy reset vectors. offset = 0x2 means skip the first + * two instructions. dst is pointer to MB vectors which are placed + * in block ram. If you want to copy reset vector setup offset to 0x0 */ +#if !CONFIG_MANUAL_RESET_VECTOR + offset = 0x2; +#endif + dst = (unsigned long *) (offset * sizeof(u32)); + for (src = __ivt_start + offset; src < __ivt_end; src++, dst++) + *dst = *src; + + /* Initialize global data */ + per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ + per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; +} + +void __init time_init(void) +{ + of_clk_init(NULL); + setup_cpuinfo_clk(); + timer_probe(); +} + +#ifdef CONFIG_DEBUG_FS +struct dentry *of_debugfs_root; + +static int microblaze_debugfs_init(void) +{ + of_debugfs_root = debugfs_create_dir("microblaze", NULL); + + return of_debugfs_root == NULL; +} +arch_initcall(microblaze_debugfs_init); + +# ifdef CONFIG_MMU +static int __init debugfs_tlb(void) +{ + struct dentry *d; + + if (!of_debugfs_root) + return -ENODEV; + + d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip); + if (!d) + return -ENOMEM; + + return 0; +} +device_initcall(debugfs_tlb); +# endif +#endif diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c new file mode 100644 index 000000000..97001524c --- /dev/null +++ b/arch/microblaze/kernel/signal.c @@ -0,0 +1,325 @@ +/* + * Signal handling + * + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2003,2004 John Williams <jwilliams@itee.uq.edu.au> + * Copyright (C) 2001 NEC Corporation + * Copyright (C) 2001 Miles Bader <miles@gnu.org> + * Copyright (C) 1999,2000 Niibe Yutaka & Kaz Kojima + * Copyright (C) 1991,1992 Linus Torvalds + * + * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson + * + * This file was was derived from the sh version, arch/sh/kernel/signal.c + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/unistd.h> +#include <linux/stddef.h> +#include <linux/personality.h> +#include <linux/percpu.h> +#include <linux/linkage.h> +#include <linux/tracehook.h> +#include <asm/entry.h> +#include <asm/ucontext.h> +#include <linux/uaccess.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <linux/syscalls.h> +#include <asm/cacheflush.h> +#include <asm/syscalls.h> + +/* + * Do a signal return; undo the signal stack. + */ +struct sigframe { + struct sigcontext sc; + unsigned long extramask[_NSIG_WORDS-1]; + unsigned long tramp[2]; /* signal trampoline */ +}; + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; + unsigned long tramp[2]; /* signal trampoline */ +}; + +static int restore_sigcontext(struct pt_regs *regs, + struct sigcontext __user *sc, int *rval_p) +{ + unsigned int err = 0; + +#define COPY(x) {err |= __get_user(regs->x, &sc->regs.x); } + COPY(r0); + COPY(r1); + COPY(r2); COPY(r3); COPY(r4); COPY(r5); + COPY(r6); COPY(r7); COPY(r8); COPY(r9); + COPY(r10); COPY(r11); COPY(r12); COPY(r13); + COPY(r14); COPY(r15); COPY(r16); COPY(r17); + COPY(r18); COPY(r19); COPY(r20); COPY(r21); + COPY(r22); COPY(r23); COPY(r24); COPY(r25); + COPY(r26); COPY(r27); COPY(r28); COPY(r29); + COPY(r30); COPY(r31); + COPY(pc); COPY(ear); COPY(esr); COPY(fsr); +#undef COPY + + *rval_p = regs->r3; + + return err; +} + +asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe __user *frame = + (struct rt_sigframe __user *)(regs->r1); + + sigset_t set; + int rval; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + set_current_blocked(&set); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return rval; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +/* + * Set up a signal frame. + */ + +static int +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + unsigned long mask) +{ + int err = 0; + +#define COPY(x) {err |= __put_user(regs->x, &sc->regs.x); } + COPY(r0); + COPY(r1); + COPY(r2); COPY(r3); COPY(r4); COPY(r5); + COPY(r6); COPY(r7); COPY(r8); COPY(r9); + COPY(r10); COPY(r11); COPY(r12); COPY(r13); + COPY(r14); COPY(r15); COPY(r16); COPY(r17); + COPY(r18); COPY(r19); COPY(r20); COPY(r21); + COPY(r22); COPY(r23); COPY(r24); COPY(r25); + COPY(r26); COPY(r27); COPY(r28); COPY(r29); + COPY(r30); COPY(r31); + COPY(pc); COPY(ear); COPY(esr); COPY(fsr); +#undef COPY + + err |= __put_user(mask, &sc->oldmask); + + return err; +} + +/* + * Determine which stack to use.. + */ +static inline void __user * +get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size) +{ + /* Default to using normal stack */ + unsigned long sp = sigsp(regs->r1, ksig); + + return (void __user *)((sp - frame_size) & -8UL); +} + +static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + int err = 0, sig = ksig->sig; + unsigned long address = 0; +#ifdef CONFIG_MMU + pmd_t *pmdp; + pte_t *ptep; +#endif + + frame = get_sigframe(ksig, regs, sizeof(*frame)); + + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + return -EFAULT; + + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(NULL, &frame->uc.uc_link); + err |= __save_altstack(&frame->uc.uc_stack, regs->r1); + err |= setup_sigcontext(&frame->uc.uc_mcontext, + regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + /* minus 8 is offset to cater for "rtsd r15,8" */ + /* addi r12, r0, __NR_sigreturn */ + err |= __put_user(0x31800000 | __NR_rt_sigreturn , + frame->tramp + 0); + /* brki r14, 0x8 */ + err |= __put_user(0xb9cc0008, frame->tramp + 1); + + /* Return from sighandler will jump to the tramp. + Negative 8 offset because return is rtsd r15, 8 */ + regs->r15 = ((unsigned long)frame->tramp)-8; + + address = ((unsigned long)frame->tramp); +#ifdef CONFIG_MMU + pmdp = pmd_offset(pud_offset( + pgd_offset(current->mm, address), + address), address); + + preempt_disable(); + ptep = pte_offset_map(pmdp, address); + if (pte_present(*ptep)) { + address = (unsigned long) page_address(pte_page(*ptep)); + /* MS: I need add offset in page */ + address += ((unsigned long)frame->tramp) & ~PAGE_MASK; + /* MS address is virtual */ + address = __virt_to_phys(address); + invalidate_icache_range(address, address + 8); + flush_dcache_range(address, address + 8); + } + pte_unmap(ptep); + preempt_enable(); +#else + flush_icache_range(address, address + 8); + flush_dcache_range(address, address + 8); +#endif + if (err) + return -EFAULT; + + /* Set up registers for signal handler */ + regs->r1 = (unsigned long) frame; + + /* Signal handler args: */ + regs->r5 = sig; /* arg 0: signum */ + regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */ + regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */ + /* Offset to handle microblaze rtid r14, 0 */ + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; + +#ifdef DEBUG_SIG + pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n", + current->comm, current->pid, frame, regs->pc); +#endif + + return 0; +} + +/* Handle restarting system calls */ +static inline void +handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) +{ + switch (regs->r3) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + if (!has_handler) + goto do_restart; + regs->r3 = -EINTR; + break; + case -ERESTARTSYS: + if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { + regs->r3 = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: +do_restart: + /* offset of 4 bytes to re-execute trap (brki) instruction */ + regs->pc -= 4; + break; + } +} + +/* + * OK, we're invoking a handler + */ + +static void +handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + /* Set up the stack frame */ + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static void do_signal(struct pt_regs *regs, int in_syscall) +{ + struct ksignal ksig; + +#ifdef DEBUG_SIG + pr_info("do signal: %p %d\n", regs, in_syscall); + pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, + regs->r12, current_thread_info()->flags); +#endif + + if (get_signal(&ksig)) { + /* Whee! Actually deliver the signal. */ + if (in_syscall) + handle_restart(regs, &ksig.ka, 1); + handle_signal(&ksig, regs); + return; + } + + if (in_syscall) + handle_restart(regs, NULL, 0); + + /* + * If there's no signal to deliver, we just put the saved sigmask + * back. + */ + restore_saved_sigmask(); +} + +asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall) +{ + if (test_thread_flag(TIF_SIGPENDING)) + do_signal(regs, in_syscall); + + if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) + tracehook_notify_resume(regs); +} diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c new file mode 100644 index 000000000..b4debe283 --- /dev/null +++ b/arch/microblaze/kernel/stacktrace.c @@ -0,0 +1,31 @@ +/* + * Stack trace support for Microblaze. + * + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/stacktrace.h> +#include <linux/thread_info.h> +#include <linux/ptrace.h> +#include <asm/unwind.h> + +void save_stack_trace(struct stack_trace *trace) +{ + /* Exclude our helper functions from the trace*/ + trace->skip += 2; + microblaze_unwind(NULL, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + microblaze_unwind(tsk, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c new file mode 100644 index 000000000..ed9f34da1 --- /dev/null +++ b/arch/microblaze/kernel/sys_microblaze.c @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * + * Copyright (C) 2006 Atmark Techno, Inc. + * Yasushi SHOJI <yashi@atmark-techno.com> + * Tetsuya OHKAWA <tetsuya@atmark-techno.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/syscalls.h> +#include <linux/sem.h> +#include <linux/msg.h> +#include <linux/shm.h> +#include <linux/stat.h> +#include <linux/mman.h> +#include <linux/sys.h> +#include <linux/ipc.h> +#include <linux/file.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/semaphore.h> +#include <linux/uaccess.h> +#include <linux/unistd.h> +#include <linux/slab.h> +#include <asm/syscalls.h> + +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + off_t, pgoff) +{ + if (pgoff & ~PAGE_MASK) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); +} + +SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, pgoff) +{ + if (pgoff & (~PAGE_MASK >> 12)) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT - 12)); +} diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S new file mode 100644 index 000000000..6ab650593 --- /dev/null +++ b/arch/microblaze/kernel/syscall_table.S @@ -0,0 +1,404 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +ENTRY(sys_call_table) + .long sys_restart_syscall /* 0 - old "setup()" system call, + * used for restarting */ + .long sys_exit + .long sys_fork + .long sys_read + .long sys_write + .long sys_open /* 5 */ + .long sys_close + .long sys_waitpid + .long sys_creat + .long sys_link + .long sys_unlink /* 10 */ + .long sys_execve + .long sys_chdir + .long sys_time + .long sys_mknod + .long sys_chmod /* 15 */ + .long sys_lchown + .long sys_ni_syscall /* old break syscall holder */ + .long sys_ni_syscall /* old stat */ + .long sys_lseek + .long sys_getpid /* 20 */ + .long sys_mount + .long sys_oldumount + .long sys_setuid + .long sys_getuid + .long sys_stime /* 25 */ + .long sys_ptrace + .long sys_alarm + .long sys_ni_syscall /* oldfstat */ + .long sys_pause + .long sys_utime /* 30 */ + .long sys_ni_syscall /* old stty syscall holder */ + .long sys_ni_syscall /* old gtty syscall holder */ + .long sys_access + .long sys_nice + .long sys_ni_syscall /* 35 - old ftime syscall holder */ + .long sys_sync + .long sys_kill + .long sys_rename + .long sys_mkdir + .long sys_rmdir /* 40 */ + .long sys_dup + .long sys_pipe + .long sys_times + .long sys_ni_syscall /* old prof syscall holder */ + .long sys_brk /* 45 */ + .long sys_setgid + .long sys_getgid + .long sys_signal + .long sys_geteuid + .long sys_getegid /* 50 */ + .long sys_acct + .long sys_umount /* recycled never used phys() */ + .long sys_ni_syscall /* old lock syscall holder */ + .long sys_ioctl + .long sys_fcntl /* 55 */ + .long sys_ni_syscall /* old mpx syscall holder */ + .long sys_setpgid + .long sys_ni_syscall /* old ulimit syscall holder */ + .long sys_ni_syscall /* olduname */ + .long sys_umask /* 60 */ + .long sys_chroot + .long sys_ustat + .long sys_dup2 + .long sys_getppid + .long sys_getpgrp /* 65 */ + .long sys_setsid + .long sys_ni_syscall /* sys_sigaction */ + .long sys_sgetmask + .long sys_ssetmask + .long sys_setreuid /* 70 */ + .long sys_setregid + .long sys_ni_syscall /* sys_sigsuspend_wrapper */ + .long sys_sigpending + .long sys_sethostname + .long sys_setrlimit /* 75 */ + .long sys_ni_syscall /* old_getrlimit */ + .long sys_getrusage + .long sys_gettimeofday + .long sys_settimeofday + .long sys_getgroups /* 80 */ + .long sys_setgroups + .long sys_ni_syscall /* old_select */ + .long sys_symlink + .long sys_ni_syscall /* oldlstat */ + .long sys_readlink /* 85 */ + .long sys_uselib + .long sys_swapon + .long sys_reboot + .long sys_ni_syscall /* old_readdir */ + .long sys_mmap /* 90 */ /* old_mmap */ + .long sys_munmap + .long sys_truncate + .long sys_ftruncate + .long sys_fchmod + .long sys_fchown /* 95 */ + .long sys_getpriority + .long sys_setpriority + .long sys_ni_syscall /* old profil syscall holder */ + .long sys_statfs + .long sys_fstatfs /* 100 */ + .long sys_ni_syscall /* ioperm */ + .long sys_socketcall + .long sys_syslog /* operation with system console */ + .long sys_setitimer + .long sys_getitimer /* 105 */ + .long sys_newstat + .long sys_newlstat + .long sys_newfstat + .long sys_ni_syscall /* uname */ + .long sys_ni_syscall /* 110 */ /* iopl */ + .long sys_vhangup + .long sys_ni_syscall /* old "idle" system call */ + .long sys_ni_syscall /* old sys_vm86old */ + .long sys_wait4 + .long sys_swapoff /* 115 */ + .long sys_sysinfo + .long sys_ni_syscall /* old sys_ipc */ + .long sys_fsync + .long sys_ni_syscall /* sys_sigreturn_wrapper */ + .long sys_clone /* 120 */ + .long sys_setdomainname + .long sys_newuname + .long sys_ni_syscall /* modify_ldt */ + .long sys_adjtimex + .long sys_mprotect /* 125: sys_mprotect */ + .long sys_sigprocmask + .long sys_ni_syscall /* old "create_module" */ + .long sys_init_module + .long sys_delete_module + .long sys_ni_syscall /* 130: old "get_kernel_syms" */ + .long sys_quotactl + .long sys_getpgid + .long sys_fchdir + .long sys_bdflush + .long sys_sysfs /* 135 */ + .long sys_personality + .long sys_ni_syscall /* reserved for afs_syscall */ + .long sys_setfsuid + .long sys_setfsgid + .long sys_llseek /* 140 */ + .long sys_getdents + .long sys_select + .long sys_flock + .long sys_msync + .long sys_readv /* 145 */ + .long sys_writev + .long sys_getsid + .long sys_fdatasync + .long sys_sysctl + .long sys_mlock /* 150: sys_mlock */ + .long sys_munlock + .long sys_mlockall + .long sys_munlockall + .long sys_sched_setparam + .long sys_sched_getparam /* 155 */ + .long sys_sched_setscheduler + .long sys_sched_getscheduler + .long sys_sched_yield + .long sys_sched_get_priority_max + .long sys_sched_get_priority_min /* 160 */ + .long sys_sched_rr_get_interval + .long sys_nanosleep + .long sys_mremap + .long sys_setresuid + .long sys_getresuid /* 165 */ + .long sys_ni_syscall /* sys_vm86 */ + .long sys_ni_syscall /* Old sys_query_module */ + .long sys_poll + .long sys_ni_syscall /* old nfsservctl */ + .long sys_setresgid /* 170 */ + .long sys_getresgid + .long sys_prctl + .long sys_rt_sigreturn_wrapper + .long sys_rt_sigaction + .long sys_rt_sigprocmask /* 175 */ + .long sys_rt_sigpending + .long sys_rt_sigtimedwait + .long sys_rt_sigqueueinfo + .long sys_rt_sigsuspend + .long sys_pread64 /* 180 */ + .long sys_pwrite64 + .long sys_chown + .long sys_getcwd + .long sys_capget + .long sys_capset /* 185 */ + .long sys_ni_syscall /* sigaltstack */ + .long sys_sendfile + .long sys_ni_syscall /* reserved for streams1 */ + .long sys_ni_syscall /* reserved for streams2 */ + .long sys_vfork /* 190 */ + .long sys_getrlimit + .long sys_mmap2 + .long sys_truncate64 + .long sys_ftruncate64 + .long sys_stat64 /* 195 */ + .long sys_lstat64 + .long sys_fstat64 + .long sys_lchown + .long sys_getuid + .long sys_getgid /* 200 */ + .long sys_geteuid + .long sys_getegid + .long sys_setreuid + .long sys_setregid + .long sys_getgroups /* 205 */ + .long sys_setgroups + .long sys_fchown + .long sys_setresuid + .long sys_getresuid + .long sys_setresgid /* 210 */ + .long sys_getresgid + .long sys_chown + .long sys_setuid + .long sys_setgid + .long sys_setfsuid /* 215 */ + .long sys_setfsgid + .long sys_pivot_root + .long sys_mincore + .long sys_madvise + .long sys_getdents64 /* 220 */ + .long sys_fcntl64 + .long sys_ni_syscall /* reserved for TUX */ + .long sys_ni_syscall + .long sys_gettid + .long sys_readahead /* 225 */ + .long sys_setxattr + .long sys_lsetxattr + .long sys_fsetxattr + .long sys_getxattr + .long sys_lgetxattr /* 230 */ + .long sys_fgetxattr + .long sys_listxattr + .long sys_llistxattr + .long sys_flistxattr + .long sys_removexattr /* 235 */ + .long sys_lremovexattr + .long sys_fremovexattr + .long sys_tkill + .long sys_sendfile64 + .long sys_futex /* 240 */ + .long sys_sched_setaffinity + .long sys_sched_getaffinity + .long sys_ni_syscall /* set_thread_area */ + .long sys_ni_syscall /* get_thread_area */ + .long sys_io_setup /* 245 */ + .long sys_io_destroy + .long sys_io_getevents + .long sys_io_submit + .long sys_io_cancel + .long sys_fadvise64 /* 250 */ + .long sys_ni_syscall + .long sys_exit_group + .long sys_lookup_dcookie + .long sys_epoll_create + .long sys_epoll_ctl /* 255 */ + .long sys_epoll_wait + .long sys_remap_file_pages + .long sys_set_tid_address + .long sys_timer_create + .long sys_timer_settime /* 260 */ + .long sys_timer_gettime + .long sys_timer_getoverrun + .long sys_timer_delete + .long sys_clock_settime + .long sys_clock_gettime /* 265 */ + .long sys_clock_getres + .long sys_clock_nanosleep + .long sys_statfs64 + .long sys_fstatfs64 + .long sys_tgkill /* 270 */ + .long sys_utimes + .long sys_fadvise64_64 + .long sys_ni_syscall /* sys_vserver */ + .long sys_mbind + .long sys_get_mempolicy + .long sys_set_mempolicy + .long sys_mq_open + .long sys_mq_unlink + .long sys_mq_timedsend + .long sys_mq_timedreceive /* 280 */ + .long sys_mq_notify + .long sys_mq_getsetattr + .long sys_kexec_load + .long sys_waitid + .long sys_ni_syscall /* 285 */ /* available */ + .long sys_add_key + .long sys_request_key + .long sys_keyctl + .long sys_ioprio_set + .long sys_ioprio_get /* 290 */ + .long sys_inotify_init + .long sys_inotify_add_watch + .long sys_inotify_rm_watch + .long sys_ni_syscall /* sys_migrate_pages */ + .long sys_openat /* 295 */ + .long sys_mkdirat + .long sys_mknodat + .long sys_fchownat + .long sys_futimesat + .long sys_fstatat64 /* 300 */ + .long sys_unlinkat + .long sys_renameat + .long sys_linkat + .long sys_symlinkat + .long sys_readlinkat /* 305 */ + .long sys_fchmodat + .long sys_faccessat + .long sys_pselect6 + .long sys_ppoll + .long sys_unshare /* 310 */ + .long sys_set_robust_list + .long sys_get_robust_list + .long sys_splice + .long sys_sync_file_range + .long sys_tee /* 315 */ + .long sys_vmsplice + .long sys_move_pages + .long sys_getcpu + .long sys_epoll_pwait + .long sys_utimensat /* 320 */ + .long sys_signalfd + .long sys_timerfd_create + .long sys_eventfd + .long sys_fallocate + .long sys_semtimedop /* 325 */ + .long sys_timerfd_settime + .long sys_timerfd_gettime + .long sys_semctl + .long sys_semget + .long sys_semop /* 330 */ + .long sys_msgctl + .long sys_msgget + .long sys_msgrcv + .long sys_msgsnd + .long sys_shmat /* 335 */ + .long sys_shmctl + .long sys_shmdt + .long sys_shmget + .long sys_signalfd4 /* new syscall */ + .long sys_eventfd2 /* 340 */ + .long sys_epoll_create1 + .long sys_dup3 + .long sys_pipe2 + .long sys_inotify_init1 + .long sys_socket /* 345 */ + .long sys_socketpair + .long sys_bind + .long sys_listen + .long sys_accept + .long sys_connect /* 350 */ + .long sys_getsockname + .long sys_getpeername + .long sys_sendto + .long sys_send + .long sys_recvfrom /* 355 */ + .long sys_recv + .long sys_setsockopt + .long sys_getsockopt + .long sys_shutdown + .long sys_sendmsg /* 360 */ + .long sys_recvmsg + .long sys_accept4 + .long sys_preadv + .long sys_pwritev + .long sys_rt_tgsigqueueinfo /* 365 */ + .long sys_perf_event_open + .long sys_recvmmsg + .long sys_fanotify_init + .long sys_fanotify_mark + .long sys_prlimit64 /* 370 */ + .long sys_name_to_handle_at + .long sys_open_by_handle_at + .long sys_clock_adjtime + .long sys_syncfs + .long sys_setns /* 375 */ + .long sys_sendmmsg + .long sys_process_vm_readv + .long sys_process_vm_writev + .long sys_kcmp + .long sys_finit_module /* 380 */ + .long sys_sched_setattr + .long sys_sched_getattr + .long sys_renameat2 + .long sys_seccomp + .long sys_getrandom /* 385 */ + .long sys_memfd_create + .long sys_bpf + .long sys_execveat + .long sys_userfaultfd + .long sys_membarrier /* 390 */ + .long sys_mlock2 + .long sys_copy_file_range + .long sys_preadv2 + .long sys_pwritev2 + .long sys_pkey_mprotect /* 395 */ + .long sys_pkey_alloc + .long sys_pkey_free + .long sys_statx + .long sys_io_pgetevents + .long sys_rseq diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c new file mode 100644 index 000000000..a6683484b --- /dev/null +++ b/arch/microblaze/kernel/timer.c @@ -0,0 +1,332 @@ +/* + * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2012-2013 Xilinx, Inc. + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/sched/clock.h> +#include <linux/sched_clock.h> +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/timecounter.h> +#include <asm/cpuinfo.h> + +static void __iomem *timer_baseaddr; + +static unsigned int freq_div_hz; +static unsigned int timer_clock_freq; + +#define TCSR0 (0x00) +#define TLR0 (0x04) +#define TCR0 (0x08) +#define TCSR1 (0x10) +#define TLR1 (0x14) +#define TCR1 (0x18) + +#define TCSR_MDT (1<<0) +#define TCSR_UDT (1<<1) +#define TCSR_GENT (1<<2) +#define TCSR_CAPT (1<<3) +#define TCSR_ARHT (1<<4) +#define TCSR_LOAD (1<<5) +#define TCSR_ENIT (1<<6) +#define TCSR_ENT (1<<7) +#define TCSR_TINT (1<<8) +#define TCSR_PWMA (1<<9) +#define TCSR_ENALL (1<<10) + +static unsigned int (*read_fn)(void __iomem *); +static void (*write_fn)(u32, void __iomem *); + +static void timer_write32(u32 val, void __iomem *addr) +{ + iowrite32(val, addr); +} + +static unsigned int timer_read32(void __iomem *addr) +{ + return ioread32(addr); +} + +static void timer_write32_be(u32 val, void __iomem *addr) +{ + iowrite32be(val, addr); +} + +static unsigned int timer_read32_be(void __iomem *addr) +{ + return ioread32be(addr); +} + +static inline void xilinx_timer0_stop(void) +{ + write_fn(read_fn(timer_baseaddr + TCSR0) & ~TCSR_ENT, + timer_baseaddr + TCSR0); +} + +static inline void xilinx_timer0_start_periodic(unsigned long load_val) +{ + if (!load_val) + load_val = 1; + /* loading value to timer reg */ + write_fn(load_val, timer_baseaddr + TLR0); + + /* load the initial value */ + write_fn(TCSR_LOAD, timer_baseaddr + TCSR0); + + /* see timer data sheet for detail + * !ENALL - don't enable 'em all + * !PWMA - disable pwm + * TINT - clear interrupt status + * ENT- enable timer itself + * ENIT - enable interrupt + * !LOAD - clear the bit to let go + * ARHT - auto reload + * !CAPT - no external trigger + * !GENT - no external signal + * UDT - set the timer as down counter + * !MDT0 - generate mode + */ + write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT, + timer_baseaddr + TCSR0); +} + +static inline void xilinx_timer0_start_oneshot(unsigned long load_val) +{ + if (!load_val) + load_val = 1; + /* loading value to timer reg */ + write_fn(load_val, timer_baseaddr + TLR0); + + /* load the initial value */ + write_fn(TCSR_LOAD, timer_baseaddr + TCSR0); + + write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT, + timer_baseaddr + TCSR0); +} + +static int xilinx_timer_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + pr_debug("%s: next event, delta %x\n", __func__, (u32)delta); + xilinx_timer0_start_oneshot(delta); + return 0; +} + +static int xilinx_timer_shutdown(struct clock_event_device *evt) +{ + pr_info("%s\n", __func__); + xilinx_timer0_stop(); + return 0; +} + +static int xilinx_timer_set_periodic(struct clock_event_device *evt) +{ + pr_info("%s\n", __func__); + xilinx_timer0_start_periodic(freq_div_hz); + return 0; +} + +static struct clock_event_device clockevent_xilinx_timer = { + .name = "xilinx_clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .shift = 8, + .rating = 300, + .set_next_event = xilinx_timer_set_next_event, + .set_state_shutdown = xilinx_timer_shutdown, + .set_state_periodic = xilinx_timer_set_periodic, +}; + +static inline void timer_ack(void) +{ + write_fn(read_fn(timer_baseaddr + TCSR0), timer_baseaddr + TCSR0); +} + +static irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = &clockevent_xilinx_timer; + timer_ack(); + evt->event_handler(evt); + return IRQ_HANDLED; +} + +static struct irqaction timer_irqaction = { + .handler = timer_interrupt, + .flags = IRQF_TIMER, + .name = "timer", + .dev_id = &clockevent_xilinx_timer, +}; + +static __init int xilinx_clockevent_init(void) +{ + clockevent_xilinx_timer.mult = + div_sc(timer_clock_freq, NSEC_PER_SEC, + clockevent_xilinx_timer.shift); + clockevent_xilinx_timer.max_delta_ns = + clockevent_delta2ns((u32)~0, &clockevent_xilinx_timer); + clockevent_xilinx_timer.max_delta_ticks = (u32)~0; + clockevent_xilinx_timer.min_delta_ns = + clockevent_delta2ns(1, &clockevent_xilinx_timer); + clockevent_xilinx_timer.min_delta_ticks = 1; + clockevent_xilinx_timer.cpumask = cpumask_of(0); + clockevents_register_device(&clockevent_xilinx_timer); + + return 0; +} + +static u64 xilinx_clock_read(void) +{ + return read_fn(timer_baseaddr + TCR1); +} + +static u64 xilinx_read(struct clocksource *cs) +{ + /* reading actual value of timer 1 */ + return (u64)xilinx_clock_read(); +} + +static struct timecounter xilinx_tc = { + .cc = NULL, +}; + +static u64 xilinx_cc_read(const struct cyclecounter *cc) +{ + return xilinx_read(NULL); +} + +static struct cyclecounter xilinx_cc = { + .read = xilinx_cc_read, + .mask = CLOCKSOURCE_MASK(32), + .shift = 8, +}; + +static int __init init_xilinx_timecounter(void) +{ + xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, + xilinx_cc.shift); + + timecounter_init(&xilinx_tc, &xilinx_cc, sched_clock()); + + return 0; +} + +static struct clocksource clocksource_microblaze = { + .name = "xilinx_clocksource", + .rating = 300, + .read = xilinx_read, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init xilinx_clocksource_init(void) +{ + int ret; + + ret = clocksource_register_hz(&clocksource_microblaze, + timer_clock_freq); + if (ret) { + pr_err("failed to register clocksource"); + return ret; + } + + /* stop timer1 */ + write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT, + timer_baseaddr + TCSR1); + /* start timer1 - up counting without interrupt */ + write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1); + + /* register timecounter - for ftrace support */ + return init_xilinx_timecounter(); +} + +static int __init xilinx_timer_init(struct device_node *timer) +{ + struct clk *clk; + static int initialized; + u32 irq; + u32 timer_num = 1; + int ret; + + if (initialized) + return -EINVAL; + + initialized = 1; + + timer_baseaddr = of_iomap(timer, 0); + if (!timer_baseaddr) { + pr_err("ERROR: invalid timer base address\n"); + return -ENXIO; + } + + write_fn = timer_write32; + read_fn = timer_read32; + + write_fn(TCSR_MDT, timer_baseaddr + TCSR0); + if (!(read_fn(timer_baseaddr + TCSR0) & TCSR_MDT)) { + write_fn = timer_write32_be; + read_fn = timer_read32_be; + } + + irq = irq_of_parse_and_map(timer, 0); + if (irq <= 0) { + pr_err("Failed to parse and map irq"); + return -EINVAL; + } + + of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num); + if (timer_num) { + pr_err("Please enable two timers in HW\n"); + return -EINVAL; + } + + pr_info("%pOF: irq=%d\n", timer, irq); + + clk = of_clk_get(timer, 0); + if (IS_ERR(clk)) { + pr_err("ERROR: timer CCF input clock not found\n"); + /* If there is clock-frequency property than use it */ + of_property_read_u32(timer, "clock-frequency", + &timer_clock_freq); + } else { + timer_clock_freq = clk_get_rate(clk); + } + + if (!timer_clock_freq) { + pr_err("ERROR: Using CPU clock frequency\n"); + timer_clock_freq = cpuinfo.cpu_clock_freq; + } + + freq_div_hz = timer_clock_freq / HZ; + + ret = setup_irq(irq, &timer_irqaction); + if (ret) { + pr_err("Failed to setup IRQ"); + return ret; + } + + ret = xilinx_clocksource_init(); + if (ret) + return ret; + + ret = xilinx_clockevent_init(); + if (ret) + return ret; + + sched_clock_register(xilinx_clock_read, 32, timer_clock_freq); + + return 0; +} + +TIMER_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a", + xilinx_timer_init); diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c new file mode 100644 index 000000000..45bbba9d9 --- /dev/null +++ b/arch/microblaze/kernel/traps.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2007-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/kallsyms.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/debug_locks.h> + +#include <asm/exceptions.h> +#include <asm/unwind.h> + +void trap_init(void) +{ + __enable_hw_exceptions(); +} + +static unsigned long kstack_depth_to_print; /* 0 == entire stack */ + +static int __init kstack_setup(char *s) +{ + return !kstrtoul(s, 0, &kstack_depth_to_print); +} +__setup("kstack=", kstack_setup); + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + unsigned long words_to_show; + u32 fp = (u32) sp; + + if (fp == 0) { + if (task) { + fp = ((struct thread_info *) + (task->stack))->cpu_context.r1; + } else { + /* Pick up caller of dump_stack() */ + fp = (u32)&sp - 8; + } + } + + words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2; + if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print)) + words_to_show = kstack_depth_to_print; + + pr_info("Kernel Stack:\n"); + + /* + * Make the first line an 'odd' size if necessary to get + * remaining lines to start at an address multiple of 0x10 + */ + if (fp & 0xF) { + unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2; + if (line1_words < words_to_show) { + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, + 4, (void *)fp, line1_words << 2, 0); + fp += line1_words << 2; + words_to_show -= line1_words; + } + } + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp, + words_to_show << 2, 0); + pr_info("\n\nCall Trace:\n"); + microblaze_unwind(task, NULL); + pr_info("\n"); + + if (!task) + task = current; + + debug_show_held_locks(task); +} diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c new file mode 100644 index 000000000..34c270cb1 --- /dev/null +++ b/arch/microblaze/kernel/unwind.c @@ -0,0 +1,320 @@ +/* + * Backtrace support for Microblaze + * + * Copyright (C) 2010 Digital Design Corporation + * + * Based on arch/sh/kernel/cpu/sh5/unwind.c code which is: + * Copyright (C) 2004 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +/* #define DEBUG 1 */ +#include <linux/export.h> +#include <linux/kallsyms.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/stacktrace.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <asm/sections.h> +#include <asm/exceptions.h> +#include <asm/unwind.h> +#include <asm/switch_to.h> + +struct stack_trace; + +/* + * On Microblaze, finding the previous stack frame is a little tricky. + * At this writing (3/2010), Microblaze does not support CONFIG_FRAME_POINTERS, + * and even if it did, gcc (4.1.2) does not store the frame pointer at + * a consistent offset within each frame. To determine frame size, it is + * necessary to search for the assembly instruction that creates or reclaims + * the frame and extract the size from it. + * + * Microblaze stores the stack pointer in r1, and creates a frame via + * + * addik r1, r1, -FRAME_SIZE + * + * The frame is reclaimed via + * + * addik r1, r1, FRAME_SIZE + * + * Frame creation occurs at or near the top of a function. + * Depending on the compiler, reclaim may occur at the end, or before + * a mid-function return. + * + * A stack frame is usually not created in a leaf function. + * + */ + +/** + * get_frame_size - Extract the stack adjustment from an + * "addik r1, r1, adjust" instruction + * @instr : Microblaze instruction + * + * Return - Number of stack bytes the instruction reserves or reclaims + */ +static inline long get_frame_size(unsigned long instr) +{ + return abs((s16)(instr & 0xFFFF)); +} + +/** + * find_frame_creation - Search backward to find the instruction that creates + * the stack frame (hopefully, for the same function the + * initial PC is in). + * @pc : Program counter at which to begin the search + * + * Return - PC at which stack frame creation occurs + * NULL if this cannot be found, i.e. a leaf function + */ +static unsigned long *find_frame_creation(unsigned long *pc) +{ + int i; + + /* NOTE: Distance to search is arbitrary + * 250 works well for most things, + * 750 picks up things like tcp_recvmsg(), + * 1000 needed for fat_fill_super() + */ + for (i = 0; i < 1000; i++, pc--) { + unsigned long instr; + s16 frame_size; + + if (!kernel_text_address((unsigned long) pc)) + return NULL; + + instr = *pc; + + /* addik r1, r1, foo ? */ + if ((instr & 0xFFFF0000) != 0x30210000) + continue; /* No */ + + frame_size = get_frame_size(instr); + if ((frame_size < 8) || (frame_size & 3)) { + pr_debug(" Invalid frame size %d at 0x%p\n", + frame_size, pc); + return NULL; + } + + pr_debug(" Found frame creation at 0x%p, size %d\n", pc, + frame_size); + return pc; + } + + return NULL; +} + +/** + * lookup_prev_stack_frame - Find the stack frame of the previous function. + * @fp : Frame (stack) pointer for current function + * @pc : Program counter within current function + * @leaf_return : r15 value within current function. If the current function + * is a leaf, this is the caller's return address. + * @pprev_fp : On exit, set to frame (stack) pointer for previous function + * @pprev_pc : On exit, set to current function caller's return address + * + * Return - 0 on success, -EINVAL if the previous frame cannot be found + */ +static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, + unsigned long leaf_return, + unsigned long *pprev_fp, + unsigned long *pprev_pc) +{ + unsigned long *prologue = NULL; + + /* _switch_to is a special leaf function */ + if (pc != (unsigned long) &_switch_to) + prologue = find_frame_creation((unsigned long *)pc); + + if (prologue) { + long frame_size = get_frame_size(*prologue); + + *pprev_fp = fp + frame_size; + *pprev_pc = *(unsigned long *)fp; + } else { + if (!leaf_return) + return -EINVAL; + *pprev_pc = leaf_return; + *pprev_fp = fp; + } + + /* NOTE: don't check kernel_text_address here, to allow display + * of userland return address + */ + return (!*pprev_pc || (*pprev_pc & 3)) ? -EINVAL : 0; +} + +static void microblaze_unwind_inner(struct task_struct *task, + unsigned long pc, unsigned long fp, + unsigned long leaf_return, + struct stack_trace *trace); + +/** + * unwind_trap - Unwind through a system trap, that stored previous state + * on the stack. + */ +#ifdef CONFIG_MMU +static inline void unwind_trap(struct task_struct *task, unsigned long pc, + unsigned long fp, struct stack_trace *trace) +{ + /* To be implemented */ +} +#else +static inline void unwind_trap(struct task_struct *task, unsigned long pc, + unsigned long fp, struct stack_trace *trace) +{ + const struct pt_regs *regs = (const struct pt_regs *) fp; + microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace); +} +#endif + +/** + * microblaze_unwind_inner - Unwind the stack from the specified point + * @task : Task whose stack we are to unwind (may be NULL) + * @pc : Program counter from which we start unwinding + * @fp : Frame (stack) pointer from which we start unwinding + * @leaf_return : Value of r15 at pc. If the function is a leaf, this is + * the caller's return address. + * @trace : Where to store stack backtrace (PC values). + * NULL == print backtrace to kernel log + */ +static void microblaze_unwind_inner(struct task_struct *task, + unsigned long pc, unsigned long fp, + unsigned long leaf_return, + struct stack_trace *trace) +{ + int ofs = 0; + + pr_debug(" Unwinding with PC=%p, FP=%p\n", (void *)pc, (void *)fp); + if (!pc || !fp || (pc & 3) || (fp & 3)) { + pr_debug(" Invalid state for unwind, aborting\n"); + return; + } + for (; pc != 0;) { + unsigned long next_fp, next_pc = 0; + unsigned long return_to = pc + 2 * sizeof(unsigned long); + const struct trap_handler_info *handler = + µblaze_trap_handlers; + + /* Is previous function the HW exception handler? */ + if ((return_to >= (unsigned long)&_hw_exception_handler) + &&(return_to < (unsigned long)&ex_handler_unhandled)) { + /* + * HW exception handler doesn't save all registers, + * so we open-code a special case of unwind_trap() + */ +#ifndef CONFIG_MMU + const struct pt_regs *regs = + (const struct pt_regs *) fp; +#endif + pr_info("HW EXCEPTION\n"); +#ifndef CONFIG_MMU + microblaze_unwind_inner(task, regs->r17 - 4, + fp + EX_HANDLER_STACK_SIZ, + regs->r15, trace); +#endif + return; + } + + /* Is previous function a trap handler? */ + for (; handler->start_addr; ++handler) { + if ((return_to >= handler->start_addr) + && (return_to <= handler->end_addr)) { + if (!trace) + pr_info("%s\n", handler->trap_name); + unwind_trap(task, pc, fp, trace); + return; + } + } + pc -= ofs; + + if (trace) { +#ifdef CONFIG_STACKTRACE + if (trace->skip > 0) + trace->skip--; + else + trace->entries[trace->nr_entries++] = pc; + + if (trace->nr_entries >= trace->max_entries) + break; +#endif + } else { + /* Have we reached userland? */ + if (unlikely(pc == task_pt_regs(task)->pc)) { + pr_info("[<%p>] PID %lu [%s]\n", + (void *) pc, + (unsigned long) task->pid, + task->comm); + break; + } else + print_ip_sym(pc); + } + + /* Stop when we reach anything not part of the kernel */ + if (!kernel_text_address(pc)) + break; + + if (lookup_prev_stack_frame(fp, pc, leaf_return, &next_fp, + &next_pc) == 0) { + ofs = sizeof(unsigned long); + pc = next_pc & ~3; + fp = next_fp; + leaf_return = 0; + } else { + pr_debug(" Failed to find previous stack frame\n"); + break; + } + + pr_debug(" Next PC=%p, next FP=%p\n", + (void *)next_pc, (void *)next_fp); + } +} + +/** + * microblaze_unwind - Stack unwinder for Microblaze (external entry point) + * @task : Task whose stack we are to unwind (NULL == current) + * @trace : Where to store stack backtrace (PC values). + * NULL == print backtrace to kernel log + */ +void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) +{ + if (task) { + if (task == current) { + const struct pt_regs *regs = task_pt_regs(task); + microblaze_unwind_inner(task, regs->pc, regs->r1, + regs->r15, trace); + } else { + struct thread_info *thread_info = + (struct thread_info *)(task->stack); + const struct cpu_context *cpu_context = + &thread_info->cpu_context; + + microblaze_unwind_inner(task, + (unsigned long) &_switch_to, + cpu_context->r1, + cpu_context->r15, trace); + } + } else { + unsigned long pc, fp; + + __asm__ __volatile__ ("or %0, r1, r0" : "=r" (fp)); + + __asm__ __volatile__ ( + "brlid %0, 0f;" + "nop;" + "0:" + : "=r" (pc) + ); + + /* Since we are not a leaf function, use leaf_return = 0 */ + microblaze_unwind_inner(current, pc, fp, 0, trace); + } +} + diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S new file mode 100644 index 000000000..289d0e7f3 --- /dev/null +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +OUTPUT_ARCH(microblaze) +ENTRY(microblaze_start) + +#include <asm/page.h> +#include <asm-generic/vmlinux.lds.h> +#include <asm/thread_info.h> + +#ifdef __MICROBLAZEEL__ +jiffies = jiffies_64; +#else +jiffies = jiffies_64 + 4; +#endif + +SECTIONS { + . = CONFIG_KERNEL_START; + microblaze_start = CONFIG_KERNEL_BASE_ADDR; + .text : AT(ADDR(.text) - LOAD_OFFSET) { + _text = . ; + _stext = . ; + HEAD_TEXT + TEXT_TEXT + *(.fixup) + EXIT_TEXT + EXIT_CALL + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + . = ALIGN (4) ; + _etext = . ; + } + + . = ALIGN (4) ; + __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) { + _fdt_start = . ; /* place for fdt blob */ + *(__fdt_blob) ; /* Any link-placed DTB */ + . = _fdt_start + 0x8000; /* Pad up to 32kbyte */ + _fdt_end = . ; + } + + . = ALIGN(16); + RODATA + EXCEPTION_TABLE(16) + NOTES + + /* + * sdata2 section can go anywhere, but must be word aligned + * and SDA2_BASE must point to the middle of it + */ + .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) { + _ssrw = .; + . = ALIGN(PAGE_SIZE); /* page aligned when MMU used */ + *(.sdata2) + . = ALIGN(8); + _essrw = .; + _ssrw_size = _essrw - _ssrw; + _KERNEL_SDA2_BASE_ = _ssrw + (_ssrw_size / 2); + } + + _sdata = . ; + RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) + _edata = . ; + + /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ + . = ALIGN(8); + .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { + _ssro = .; + *(.sdata) + } + + .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { + _ssbss = .; + *(.sbss) + _esbss = .; + _essro = .; + _ssro_size = _essro - _ssro ; + _KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ; + } + + . = ALIGN(PAGE_SIZE); + __init_begin = .; + + INIT_TEXT_SECTION(PAGE_SIZE) + + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { + INIT_DATA + } + + . = ALIGN(4); + .init.ivt : AT(ADDR(.init.ivt) - LOAD_OFFSET) { + __ivt_start = .; + *(.init.ivt) + __ivt_end = .; + } + + .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { + INIT_SETUP(0) + } + + .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET ) { + INIT_CALLS + } + + .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { + CON_INITCALL + } + + SECURITY_INIT + + __init_end_before_initramfs = .; + + .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { + INIT_RAM_FS + } + + __init_end = .; + + .bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) { + /* page aligned when MMU used */ + __bss_start = . ; + *(.bss*) + *(COMMON) + . = ALIGN (4) ; + __bss_stop = . ; + } + . = ALIGN(PAGE_SIZE); + _end = .; + + DISCARDS +} diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile new file mode 100644 index 000000000..9fe7ab688 --- /dev/null +++ b/arch/microblaze/lib/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile +# + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_ashldi3.o = -pg +CFLAGS_REMOVE_ashrdi3.o = -pg +CFLAGS_REMOVE_lshrdi3.o = -pg +endif + +lib-y := memset.o + +ifeq ($(CONFIG_OPT_LIB_ASM),y) +lib-y += fastcopy.o +else +lib-y += memcpy.o memmove.o +endif + +lib-y += uaccess_old.o + +# libgcc-style stuff needed in the kernel +obj-y += ashldi3.o ashrdi3.o cmpdi2.o divsi3.o lshrdi3.o modsi3.o +obj-y += muldi3.o mulsi3.o ucmpdi2.o udivsi3.o umodsi3.o diff --git a/arch/microblaze/lib/ashldi3.c b/arch/microblaze/lib/ashldi3.c new file mode 100644 index 000000000..4d0f9481b --- /dev/null +++ b/arch/microblaze/lib/ashldi3.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> + +#include "libgcc.h" + +long long __ashldi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.low = 0; + w.s.high = (unsigned int) uu.s.low << -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.low >> bm; + + w.s.low = (unsigned int) uu.s.low << b; + w.s.high = ((unsigned int) uu.s.high << b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__ashldi3); diff --git a/arch/microblaze/lib/ashrdi3.c b/arch/microblaze/lib/ashrdi3.c new file mode 100644 index 000000000..268098a96 --- /dev/null +++ b/arch/microblaze/lib/ashrdi3.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> + +#include "libgcc.h" + +long long __ashrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = + uu.s.high >> 31; + w.s.low = uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/microblaze/lib/cmpdi2.c b/arch/microblaze/lib/cmpdi2.c new file mode 100644 index 000000000..b3f896385 --- /dev/null +++ b/arch/microblaze/lib/cmpdi2.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> + +#include "libgcc.h" + +word_type __cmpdi2(long long a, long long b) +{ + const DWunion au = { + .ll = a + }; + const DWunion bu = { + .ll = b + }; + + if (au.s.high < bu.s.high) + return 0; + else if (au.s.high > bu.s.high) + return 2; + + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + + return 1; +} +EXPORT_SYMBOL(__cmpdi2); diff --git a/arch/microblaze/lib/divsi3.S b/arch/microblaze/lib/divsi3.S new file mode 100644 index 000000000..919fb69f8 --- /dev/null +++ b/arch/microblaze/lib/divsi3.S @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> + +/* +* Divide operation for 32 bit integers. +* Input : Dividend in Reg r5 +* Divisor in Reg r6 +* Output: Result in Reg r3 +*/ + .text + .globl __divsi3 + .type __divsi3, @function + .ent __divsi3 +__divsi3: + .frame r1, 0, r15 + + addik r1, r1, -16 + swi r28, r1, 0 + swi r29, r1, 4 + swi r30, r1, 8 + swi r31, r1, 12 + + beqi r6, div_by_zero /* div_by_zero - division error */ + beqi r5, result_is_zero /* result is zero */ + bgeid r5, r5_pos + xor r28, r5, r6 /* get the sign of the result */ + rsubi r5, r5, 0 /* make r5 positive */ +r5_pos: + bgei r6, r6_pos + rsubi r6, r6, 0 /* make r6 positive */ +r6_pos: + addik r30, r0, 0 /* clear mod */ + addik r3, r0, 0 /* clear div */ + addik r29, r0, 32 /* initialize the loop count */ + + /* first part try to find the first '1' in the r5 */ +div0: + blti r5, div2 /* this traps r5 == 0x80000000 */ +div1: + add r5, r5, r5 /* left shift logical r5 */ + bgtid r5, div1 + addik r29, r29, -1 +div2: + /* left shift logical r5 get the '1' into the carry */ + add r5, r5, r5 + addc r30, r30, r30 /* move that bit into the mod register */ + rsub r31, r6, r30 /* try to subtract (r30 a r6) */ + blti r31, mod_too_small + /* move the r31 to mod since the result was positive */ + or r30, r0, r31 + addik r3, r3, 1 +mod_too_small: + addik r29, r29, -1 + beqi r29, loop_end + add r3, r3, r3 /* shift in the '1' into div */ + bri div2 /* div2 */ +loop_end: + bgei r28, return_here + brid return_here + rsubi r3, r3, 0 /* negate the result */ +div_by_zero: +result_is_zero: + or r3, r0, r0 /* set result to 0 */ +return_here: +/* restore values of csrs and that of r3 and the divisor and the dividend */ + lwi r28, r1, 0 + lwi r29, r1, 4 + lwi r30, r1, 8 + lwi r31, r1, 12 + rtsd r15, 8 + addik r1, r1, 16 + +.size __divsi3, . - __divsi3 +.end __divsi3 diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S new file mode 100644 index 000000000..fdc48bb06 --- /dev/null +++ b/arch/microblaze/lib/fastcopy.S @@ -0,0 +1,666 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2008 Jim Law - Iris LP All rights reserved. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + * Written by Jim Law <jlaw@irispower.com> + * + * intended to replace: + * memcpy in memcpy.c and + * memmove in memmove.c + * ... in arch/microblaze/lib + * + * + * assly_fastcopy.S + * + * Attempt at quicker memcpy and memmove for MicroBlaze + * Input : Operand1 in Reg r5 - destination address + * Operand2 in Reg r6 - source address + * Operand3 in Reg r7 - number of bytes to transfer + * Output: Result in Reg r3 - starting destinaition address + * + * + * Explanation: + * Perform (possibly unaligned) copy of a block of memory + * between mem locations with size of xfer spec'd in bytes + */ + +#include <linux/linkage.h> + .text + .globl memcpy + .type memcpy, @function + .ent memcpy + +memcpy: +fast_memcpy_ascending: + /* move d to return register as value of function */ + addi r3, r5, 0 + + addi r4, r0, 4 /* n = 4 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + blti r4, a_xfer_end /* if n < 0, less than one word to transfer */ + + /* transfer first 0~3 bytes to get aligned dest address */ + andi r4, r5, 3 /* n = d & 3 */ + /* if zero, destination already aligned */ + beqi r4, a_dalign_done + /* n = 4 - n (yields 3, 2, 1 transfers for 1, 2, 3 addr offset) */ + rsubi r4, r4, 4 + rsub r7, r4, r7 /* c = c - n adjust c */ + +a_xfer_first_loop: + /* if no bytes left to transfer, transfer the bulk */ + beqi r4, a_dalign_done + lbui r11, r6, 0 /* h = *s */ + sbi r11, r5, 0 /* *d = h */ + addi r6, r6, 1 /* s++ */ + addi r5, r5, 1 /* d++ */ + brid a_xfer_first_loop /* loop */ + addi r4, r4, -1 /* n-- (IN DELAY SLOT) */ + +a_dalign_done: + addi r4, r0, 32 /* n = 32 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + /* if n < 0, less than one block to transfer */ + blti r4, a_block_done + +a_block_xfer: + andi r4, r7, 0xffffffe0 /* n = c & ~31 */ + rsub r7, r4, r7 /* c = c - n */ + + andi r9, r6, 3 /* t1 = s & 3 */ + /* if temp != 0, unaligned transfers needed */ + bnei r9, a_block_unaligned + +a_block_aligned: + lwi r9, r6, 0 /* t1 = *(s + 0) */ + lwi r10, r6, 4 /* t2 = *(s + 4) */ + lwi r11, r6, 8 /* t3 = *(s + 8) */ + lwi r12, r6, 12 /* t4 = *(s + 12) */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + swi r10, r5, 4 /* *(d + 4) = t2 */ + swi r11, r5, 8 /* *(d + 8) = t3 */ + swi r12, r5, 12 /* *(d + 12) = t4 */ + lwi r9, r6, 16 /* t1 = *(s + 16) */ + lwi r10, r6, 20 /* t2 = *(s + 20) */ + lwi r11, r6, 24 /* t3 = *(s + 24) */ + lwi r12, r6, 28 /* t4 = *(s + 28) */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + swi r10, r5, 20 /* *(d + 20) = t2 */ + swi r11, r5, 24 /* *(d + 24) = t3 */ + swi r12, r5, 28 /* *(d + 28) = t4 */ + addi r6, r6, 32 /* s = s + 32 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, a_block_aligned /* while (n) loop */ + addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */ + bri a_block_done + +a_block_unaligned: + andi r8, r6, 0xfffffffc /* as = s & ~3 */ + add r6, r6, r4 /* s = s + n */ + lwi r11, r8, 0 /* h = *(as + 0) */ + + addi r9, r9, -1 + beqi r9, a_block_u1 /* t1 was 1 => 1 byte offset */ + addi r9, r9, -1 + beqi r9, a_block_u2 /* t1 was 2 => 2 byte offset */ + +a_block_u3: + bslli r11, r11, 24 /* h = h << 24 */ +a_bu3_loop: + lwi r12, r8, 4 /* v = *(as + 4) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 12) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 32 /* v = *(as + 32) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + addi r8, r8, 32 /* as = as + 32 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, a_bu3_loop /* while (n) loop */ + addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */ + bri a_block_done + +a_block_u1: + bslli r11, r11, 8 /* h = h << 8 */ +a_bu1_loop: + lwi r12, r8, 4 /* v = *(as + 4) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 12) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 32 /* v = *(as + 32) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + addi r8, r8, 32 /* as = as + 32 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, a_bu1_loop /* while (n) loop */ + addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */ + bri a_block_done + +a_block_u2: + bslli r11, r11, 16 /* h = h << 16 */ +a_bu2_loop: + lwi r12, r8, 4 /* v = *(as + 4) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 12) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 32 /* v = *(as + 32) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + addi r8, r8, 32 /* as = as + 32 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, a_bu2_loop /* while (n) loop */ + addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */ + +a_block_done: + addi r4, r0, 4 /* n = 4 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + blti r4, a_xfer_end /* if n < 0, less than one word to transfer */ + +a_word_xfer: + andi r4, r7, 0xfffffffc /* n = c & ~3 */ + addi r10, r0, 0 /* offset = 0 */ + + andi r9, r6, 3 /* t1 = s & 3 */ + /* if temp != 0, unaligned transfers needed */ + bnei r9, a_word_unaligned + +a_word_aligned: + lw r9, r6, r10 /* t1 = *(s+offset) */ + sw r9, r5, r10 /* *(d+offset) = t1 */ + addi r4, r4,-4 /* n-- */ + bneid r4, a_word_aligned /* loop */ + addi r10, r10, 4 /* offset++ (IN DELAY SLOT) */ + + bri a_word_done + +a_word_unaligned: + andi r8, r6, 0xfffffffc /* as = s & ~3 */ + lwi r11, r8, 0 /* h = *(as + 0) */ + addi r8, r8, 4 /* as = as + 4 */ + + addi r9, r9, -1 + beqi r9, a_word_u1 /* t1 was 1 => 1 byte offset */ + addi r9, r9, -1 + beqi r9, a_word_u2 /* t1 was 2 => 2 byte offset */ + +a_word_u3: + bslli r11, r11, 24 /* h = h << 24 */ +a_wu3_loop: + lw r12, r8, r10 /* v = *(as + offset) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r10 /* *(d + offset) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + addi r4, r4,-4 /* n = n - 4 */ + bneid r4, a_wu3_loop /* while (n) loop */ + addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */ + + bri a_word_done + +a_word_u1: + bslli r11, r11, 8 /* h = h << 8 */ +a_wu1_loop: + lw r12, r8, r10 /* v = *(as + offset) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r10 /* *(d + offset) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + addi r4, r4,-4 /* n = n - 4 */ + bneid r4, a_wu1_loop /* while (n) loop */ + addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */ + + bri a_word_done + +a_word_u2: + bslli r11, r11, 16 /* h = h << 16 */ +a_wu2_loop: + lw r12, r8, r10 /* v = *(as + offset) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r10 /* *(d + offset) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + addi r4, r4,-4 /* n = n - 4 */ + bneid r4, a_wu2_loop /* while (n) loop */ + addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */ + +a_word_done: + add r5, r5, r10 /* d = d + offset */ + add r6, r6, r10 /* s = s + offset */ + rsub r7, r10, r7 /* c = c - offset */ + +a_xfer_end: +a_xfer_end_loop: + beqi r7, a_done /* while (c) */ + lbui r9, r6, 0 /* t1 = *s */ + addi r6, r6, 1 /* s++ */ + sbi r9, r5, 0 /* *d = t1 */ + addi r7, r7, -1 /* c-- */ + brid a_xfer_end_loop /* loop */ + addi r5, r5, 1 /* d++ (IN DELAY SLOT) */ + +a_done: + rtsd r15, 8 + nop + +.size memcpy, . - memcpy +.end memcpy +/*----------------------------------------------------------------------------*/ + .globl memmove + .type memmove, @function + .ent memmove + +memmove: + cmpu r4, r5, r6 /* n = s - d */ + bgei r4,fast_memcpy_ascending + +fast_memcpy_descending: + /* move d to return register as value of function */ + addi r3, r5, 0 + + add r5, r5, r7 /* d = d + c */ + add r6, r6, r7 /* s = s + c */ + + addi r4, r0, 4 /* n = 4 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + blti r4,d_xfer_end /* if n < 0, less than one word to transfer */ + + /* transfer first 0~3 bytes to get aligned dest address */ + andi r4, r5, 3 /* n = d & 3 */ + /* if zero, destination already aligned */ + beqi r4,d_dalign_done + rsub r7, r4, r7 /* c = c - n adjust c */ + +d_xfer_first_loop: + /* if no bytes left to transfer, transfer the bulk */ + beqi r4,d_dalign_done + addi r6, r6, -1 /* s-- */ + addi r5, r5, -1 /* d-- */ + lbui r11, r6, 0 /* h = *s */ + sbi r11, r5, 0 /* *d = h */ + brid d_xfer_first_loop /* loop */ + addi r4, r4, -1 /* n-- (IN DELAY SLOT) */ + +d_dalign_done: + addi r4, r0, 32 /* n = 32 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + /* if n < 0, less than one block to transfer */ + blti r4, d_block_done + +d_block_xfer: + andi r4, r7, 0xffffffe0 /* n = c & ~31 */ + rsub r7, r4, r7 /* c = c - n */ + + andi r9, r6, 3 /* t1 = s & 3 */ + /* if temp != 0, unaligned transfers needed */ + bnei r9, d_block_unaligned + +d_block_aligned: + addi r6, r6, -32 /* s = s - 32 */ + addi r5, r5, -32 /* d = d - 32 */ + lwi r9, r6, 28 /* t1 = *(s + 28) */ + lwi r10, r6, 24 /* t2 = *(s + 24) */ + lwi r11, r6, 20 /* t3 = *(s + 20) */ + lwi r12, r6, 16 /* t4 = *(s + 16) */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + swi r10, r5, 24 /* *(d + 24) = t2 */ + swi r11, r5, 20 /* *(d + 20) = t3 */ + swi r12, r5, 16 /* *(d + 16) = t4 */ + lwi r9, r6, 12 /* t1 = *(s + 12) */ + lwi r10, r6, 8 /* t2 = *(s + 8) */ + lwi r11, r6, 4 /* t3 = *(s + 4) */ + lwi r12, r6, 0 /* t4 = *(s + 0) */ + swi r9, r5, 12 /* *(d + 12) = t1 */ + swi r10, r5, 8 /* *(d + 8) = t2 */ + swi r11, r5, 4 /* *(d + 4) = t3 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, d_block_aligned /* while (n) loop */ + swi r12, r5, 0 /* *(d + 0) = t4 (IN DELAY SLOT) */ + bri d_block_done + +d_block_unaligned: + andi r8, r6, 0xfffffffc /* as = s & ~3 */ + rsub r6, r4, r6 /* s = s - n */ + lwi r11, r8, 0 /* h = *(as + 0) */ + + addi r9, r9, -1 + beqi r9,d_block_u1 /* t1 was 1 => 1 byte offset */ + addi r9, r9, -1 + beqi r9,d_block_u2 /* t1 was 2 => 2 byte offset */ + +d_block_u3: + bsrli r11, r11, 8 /* h = h >> 8 */ +d_bu3_loop: + addi r8, r8, -32 /* as = as - 32 */ + addi r5, r5, -32 /* d = d - 32 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 112) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 4 /* v = *(as + 4) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 0 /* v = *(as + 0) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, d_bu3_loop /* while (n) loop */ + bsrli r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */ + bri d_block_done + +d_block_u1: + bsrli r11, r11, 24 /* h = h >> 24 */ +d_bu1_loop: + addi r8, r8, -32 /* as = as - 32 */ + addi r5, r5, -32 /* d = d - 32 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 112) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 4 /* v = *(as + 4) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 0 /* v = *(as + 0) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, d_bu1_loop /* while (n) loop */ + bsrli r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */ + bri d_block_done + +d_block_u2: + bsrli r11, r11, 16 /* h = h >> 16 */ +d_bu2_loop: + addi r8, r8, -32 /* as = as - 32 */ + addi r5, r5, -32 /* d = d - 32 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 112) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 4 /* v = *(as + 4) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 0 /* v = *(as + 0) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, d_bu2_loop /* while (n) loop */ + bsrli r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */ + +d_block_done: + addi r4, r0, 4 /* n = 4 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + blti r4,d_xfer_end /* if n < 0, less than one word to transfer */ + +d_word_xfer: + andi r4, r7, 0xfffffffc /* n = c & ~3 */ + rsub r5, r4, r5 /* d = d - n */ + rsub r6, r4, r6 /* s = s - n */ + rsub r7, r4, r7 /* c = c - n */ + + andi r9, r6, 3 /* t1 = s & 3 */ + /* if temp != 0, unaligned transfers needed */ + bnei r9, d_word_unaligned + +d_word_aligned: + addi r4, r4,-4 /* n-- */ + lw r9, r6, r4 /* t1 = *(s+n) */ + bneid r4, d_word_aligned /* loop */ + sw r9, r5, r4 /* *(d+n) = t1 (IN DELAY SLOT) */ + + bri d_word_done + +d_word_unaligned: + andi r8, r6, 0xfffffffc /* as = s & ~3 */ + lw r11, r8, r4 /* h = *(as + n) */ + + addi r9, r9, -1 + beqi r9,d_word_u1 /* t1 was 1 => 1 byte offset */ + addi r9, r9, -1 + beqi r9,d_word_u2 /* t1 was 2 => 2 byte offset */ + +d_word_u3: + bsrli r11, r11, 8 /* h = h >> 8 */ +d_wu3_loop: + addi r4, r4,-4 /* n = n - 4 */ + lw r12, r8, r4 /* v = *(as + n) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r4 /* *(d + n) = t1 */ + bneid r4, d_wu3_loop /* while (n) loop */ + bsrli r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */ + + bri d_word_done + +d_word_u1: + bsrli r11, r11, 24 /* h = h >> 24 */ +d_wu1_loop: + addi r4, r4,-4 /* n = n - 4 */ + lw r12, r8, r4 /* v = *(as + n) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r4 /* *(d + n) = t1 */ + bneid r4, d_wu1_loop /* while (n) loop */ + bsrli r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */ + + bri d_word_done + +d_word_u2: + bsrli r11, r11, 16 /* h = h >> 16 */ +d_wu2_loop: + addi r4, r4,-4 /* n = n - 4 */ + lw r12, r8, r4 /* v = *(as + n) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r4 /* *(d + n) = t1 */ + bneid r4, d_wu2_loop /* while (n) loop */ + bsrli r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */ + +d_word_done: + +d_xfer_end: +d_xfer_end_loop: + beqi r7, a_done /* while (c) */ + addi r6, r6, -1 /* s-- */ + lbui r9, r6, 0 /* t1 = *s */ + addi r5, r5, -1 /* d-- */ + sbi r9, r5, 0 /* *d = t1 */ + brid d_xfer_end_loop /* loop */ + addi r7, r7, -1 /* c-- (IN DELAY SLOT) */ + +d_done: + rtsd r15, 8 + nop + +.size memmove, . - memmove +.end memmove diff --git a/arch/microblaze/lib/libgcc.h b/arch/microblaze/lib/libgcc.h new file mode 100644 index 000000000..a909366c2 --- /dev/null +++ b/arch/microblaze/lib/libgcc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_LIBGCC_H +#define __ASM_LIBGCC_H + +#include <asm/byteorder.h> + +typedef int word_type __attribute__ ((mode (__word__))); + +#ifdef __BIG_ENDIAN +struct DWstruct { + int high, low; +}; +#elif defined(__LITTLE_ENDIAN) +struct DWstruct { + int low, high; +}; +#else +#error I feel sick. +#endif + +typedef union { + struct DWstruct s; + long long ll; +} DWunion; + +extern long long __ashldi3(long long u, word_type b); +extern long long __ashrdi3(long long u, word_type b); +extern word_type __cmpdi2(long long a, long long b); +extern long long __lshrdi3(long long u, word_type b); +extern long long __muldi3(long long u, long long v); +extern word_type __ucmpdi2(unsigned long long a, unsigned long long b); + +#endif /* __ASM_LIBGCC_H */ diff --git a/arch/microblaze/lib/lshrdi3.c b/arch/microblaze/lib/lshrdi3.c new file mode 100644 index 000000000..00eb82682 --- /dev/null +++ b/arch/microblaze/lib/lshrdi3.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> + +#include "libgcc.h" + +long long __lshrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.high = 0; + w.s.low = (unsigned int) uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = (unsigned int) uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c new file mode 100644 index 000000000..f536e81b8 --- /dev/null +++ b/arch/microblaze/lib/memcpy.c @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2007 John Williams + * + * Reasonably optimised generic C-code for memcpy on Microblaze + * This is generic C code to do efficient, alignment-aware memcpy. + * + * It is based on demo code originally Copyright 2001 by Intel Corp, taken from + * http://www.embedded.com/showArticle.jhtml?articleID=19205567 + * + * Attempts were made, unsuccessfully, to contact the original + * author of this code (Michael Morrow, Intel). Below is the original + * copyright notice. + * + * This software has been developed by Intel Corporation. + * Intel specifically disclaims all warranties, express or + * implied, and all liability, including consequential and + * other indirect damages, for the use of this program, including + * liability for infringement of any proprietary rights, + * and including the warranties of merchantability and fitness + * for a particular purpose. Intel does not assume any + * responsibility for and errors which may appear in this program + * not any responsibility to update it. + */ + +#include <linux/export.h> +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/compiler.h> + +#include <linux/string.h> + +#ifdef __HAVE_ARCH_MEMCPY +#ifndef CONFIG_OPT_LIB_FUNCTION +void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + + /* Simple, byte oriented memcpy. */ + while (c--) + *dst++ = *src++; + + return v_dst; +} +#else /* CONFIG_OPT_LIB_FUNCTION */ +void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + + /* The following code tries to optimize the copy by using unsigned + * alignment. This will work fine if both source and destination are + * aligned on the same boundary. However, if they are aligned on + * different boundaries shifts will be necessary. This might result in + * bad performance on MicroBlaze systems without a barrel shifter. + */ + const uint32_t *i_src; + uint32_t *i_dst; + + if (likely(c >= 4)) { + unsigned value, buf_hold; + + /* Align the destination to a word boundary. */ + /* This is done in an endian independent manner. */ + switch ((unsigned long)dst & 3) { + case 1: + *dst++ = *src++; + --c; + case 2: + *dst++ = *src++; + --c; + case 3: + *dst++ = *src++; + --c; + } + + i_dst = (void *)dst; + + /* Choose a copy scheme based on the source */ + /* alignment relative to destination. */ + switch ((unsigned long)src & 3) { + case 0x0: /* Both byte offsets are aligned */ + i_src = (const void *)src; + + for (; c >= 4; c -= 4) + *i_dst++ = *i_src++; + + src = (const void *)i_src; + break; + case 0x1: /* Unaligned - Off by 1 */ + /* Word align the source */ + i_src = (const void *) ((unsigned)src & ~3); +#ifndef __MICROBLAZEEL__ + /* Load the holding buffer */ + buf_hold = *i_src++ << 8; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | value >> 24; + buf_hold = value << 8; + } +#else + /* Load the holding buffer */ + buf_hold = (*i_src++ & 0xFFFFFF00) >> 8; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | ((value & 0xFF) << 24); + buf_hold = (value & 0xFFFFFF00) >> 8; + } +#endif + /* Realign the source */ + src = (const void *)i_src; + src -= 3; + break; + case 0x2: /* Unaligned - Off by 2 */ + /* Word align the source */ + i_src = (const void *) ((unsigned)src & ~3); +#ifndef __MICROBLAZEEL__ + /* Load the holding buffer */ + buf_hold = *i_src++ << 16; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | value >> 16; + buf_hold = value << 16; + } +#else + /* Load the holding buffer */ + buf_hold = (*i_src++ & 0xFFFF0000) >> 16; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | ((value & 0xFFFF) << 16); + buf_hold = (value & 0xFFFF0000) >> 16; + } +#endif + /* Realign the source */ + src = (const void *)i_src; + src -= 2; + break; + case 0x3: /* Unaligned - Off by 3 */ + /* Word align the source */ + i_src = (const void *) ((unsigned)src & ~3); +#ifndef __MICROBLAZEEL__ + /* Load the holding buffer */ + buf_hold = *i_src++ << 24; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | value >> 8; + buf_hold = value << 24; + } +#else + /* Load the holding buffer */ + buf_hold = (*i_src++ & 0xFF000000) >> 24; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | ((value & 0xFFFFFF) << 8); + buf_hold = (value & 0xFF000000) >> 24; + } +#endif + /* Realign the source */ + src = (const void *)i_src; + src -= 1; + break; + } + dst = (void *)i_dst; + } + + /* Finish off any remaining bytes */ + /* simple fast copy, ... unless a cache boundary is crossed */ + switch (c) { + case 3: + *dst++ = *src++; + case 2: + *dst++ = *src++; + case 1: + *dst++ = *src++; + } + + return v_dst; +} +#endif /* CONFIG_OPT_LIB_FUNCTION */ +EXPORT_SYMBOL(memcpy); +#endif /* __HAVE_ARCH_MEMCPY */ diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c new file mode 100644 index 000000000..3611ce704 --- /dev/null +++ b/arch/microblaze/lib/memmove.c @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2007 John Williams + * + * Reasonably optimised generic C-code for memcpy on Microblaze + * This is generic C code to do efficient, alignment-aware memmove. + * + * It is based on demo code originally Copyright 2001 by Intel Corp, taken from + * http://www.embedded.com/showArticle.jhtml?articleID=19205567 + * + * Attempts were made, unsuccessfully, to contact the original + * author of this code (Michael Morrow, Intel). Below is the original + * copyright notice. + * + * This software has been developed by Intel Corporation. + * Intel specifically disclaims all warranties, express or + * implied, and all liability, including consequential and + * other indirect damages, for the use of this program, including + * liability for infringement of any proprietary rights, + * and including the warranties of merchantability and fitness + * for a particular purpose. Intel does not assume any + * responsibility for and errors which may appear in this program + * not any responsibility to update it. + */ + +#include <linux/export.h> +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/compiler.h> +#include <linux/string.h> + +#ifdef __HAVE_ARCH_MEMMOVE +#ifndef CONFIG_OPT_LIB_FUNCTION +void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + + if (!c) + return v_dst; + + /* Use memcpy when source is higher than dest */ + if (v_dst <= v_src) + return memcpy(v_dst, v_src, c); + + /* copy backwards, from end to beginning */ + src += c; + dst += c; + + /* Simple, byte oriented memmove. */ + while (c--) + *--dst = *--src; + + return v_dst; +} +#else /* CONFIG_OPT_LIB_FUNCTION */ +void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + const uint32_t *i_src; + uint32_t *i_dst; + + if (!c) + return v_dst; + + /* Use memcpy when source is higher than dest */ + if (v_dst <= v_src) + return memcpy(v_dst, v_src, c); + + /* The following code tries to optimize the copy by using unsigned + * alignment. This will work fine if both source and destination are + * aligned on the same boundary. However, if they are aligned on + * different boundaries shifts will be necessary. This might result in + * bad performance on MicroBlaze systems without a barrel shifter. + */ + /* FIXME this part needs more test */ + /* Do a descending copy - this is a bit trickier! */ + dst += c; + src += c; + + if (c >= 4) { + unsigned value, buf_hold; + + /* Align the destination to a word boundary. */ + /* This is done in an endian independent manner. */ + + switch ((unsigned long)dst & 3) { + case 3: + *--dst = *--src; + --c; + case 2: + *--dst = *--src; + --c; + case 1: + *--dst = *--src; + --c; + } + + i_dst = (void *)dst; + /* Choose a copy scheme based on the source */ + /* alignment relative to dstination. */ + switch ((unsigned long)src & 3) { + case 0x0: /* Both byte offsets are aligned */ + + i_src = (const void *)src; + + for (; c >= 4; c -= 4) + *--i_dst = *--i_src; + + src = (const void *)i_src; + break; + case 0x1: /* Unaligned - Off by 1 */ + /* Word align the source */ + i_src = (const void *) (((unsigned)src + 4) & ~3); +#ifndef __MICROBLAZEEL__ + /* Load the holding buffer */ + buf_hold = *--i_src >> 24; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold << 8 | value; + buf_hold = value >> 24; + } +#else + /* Load the holding buffer */ + buf_hold = (*--i_src & 0xFF) << 24; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold | + ((value & 0xFFFFFF00) >> 8); + buf_hold = (value & 0xFF) << 24; + } +#endif + /* Realign the source */ + src = (const void *)i_src; + src += 1; + break; + case 0x2: /* Unaligned - Off by 2 */ + /* Word align the source */ + i_src = (const void *) (((unsigned)src + 4) & ~3); +#ifndef __MICROBLAZEEL__ + /* Load the holding buffer */ + buf_hold = *--i_src >> 16; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold << 16 | value; + buf_hold = value >> 16; + } +#else + /* Load the holding buffer */ + buf_hold = (*--i_src & 0xFFFF) << 16; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold | + ((value & 0xFFFF0000) >> 16); + buf_hold = (value & 0xFFFF) << 16; + } +#endif + /* Realign the source */ + src = (const void *)i_src; + src += 2; + break; + case 0x3: /* Unaligned - Off by 3 */ + /* Word align the source */ + i_src = (const void *) (((unsigned)src + 4) & ~3); +#ifndef __MICROBLAZEEL__ + /* Load the holding buffer */ + buf_hold = *--i_src >> 8; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold << 24 | value; + buf_hold = value >> 8; + } +#else + /* Load the holding buffer */ + buf_hold = (*--i_src & 0xFFFFFF) << 8; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold | + ((value & 0xFF000000) >> 24); + buf_hold = (value & 0xFFFFFF) << 8; + } +#endif + /* Realign the source */ + src = (const void *)i_src; + src += 3; + break; + } + dst = (void *)i_dst; + } + + /* simple fast copy, ... unless a cache boundary is crossed */ + /* Finish off any remaining bytes */ + switch (c) { + case 4: + *--dst = *--src; + case 3: + *--dst = *--src; + case 2: + *--dst = *--src; + case 1: + *--dst = *--src; + } + return v_dst; +} +#endif /* CONFIG_OPT_LIB_FUNCTION */ +EXPORT_SYMBOL(memmove); +#endif /* __HAVE_ARCH_MEMMOVE */ diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c new file mode 100644 index 000000000..04ea72c8a --- /dev/null +++ b/arch/microblaze/lib/memset.c @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2007 John Williams + * + * Reasonably optimised generic C-code for memset on Microblaze + * This is generic C code to do efficient, alignment-aware memcpy. + * + * It is based on demo code originally Copyright 2001 by Intel Corp, taken from + * http://www.embedded.com/showArticle.jhtml?articleID=19205567 + * + * Attempts were made, unsuccessfully, to contact the original + * author of this code (Michael Morrow, Intel). Below is the original + * copyright notice. + * + * This software has been developed by Intel Corporation. + * Intel specifically disclaims all warranties, express or + * implied, and all liability, including consequential and + * other indirect damages, for the use of this program, including + * liability for infringement of any proprietary rights, + * and including the warranties of merchantability and fitness + * for a particular purpose. Intel does not assume any + * responsibility for and errors which may appear in this program + * not any responsibility to update it. + */ + +#include <linux/export.h> +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/compiler.h> +#include <linux/string.h> + +#ifdef __HAVE_ARCH_MEMSET +#ifndef CONFIG_OPT_LIB_FUNCTION +void *memset(void *v_src, int c, __kernel_size_t n) +{ + char *src = v_src; + + /* Truncate c to 8 bits */ + c = (c & 0xFF); + + /* Simple, byte oriented memset or the rest of count. */ + while (n--) + *src++ = c; + + return v_src; +} +#else /* CONFIG_OPT_LIB_FUNCTION */ +void *memset(void *v_src, int c, __kernel_size_t n) +{ + char *src = v_src; + uint32_t *i_src; + uint32_t w32 = 0; + + /* Truncate c to 8 bits */ + c = (c & 0xFF); + + if (unlikely(c)) { + /* Make a repeating word out of it */ + w32 = c; + w32 |= w32 << 8; + w32 |= w32 << 16; + } + + if (likely(n >= 4)) { + /* Align the destination to a word boundary */ + /* This is done in an endian independent manner */ + switch ((unsigned) src & 3) { + case 1: + *src++ = c; + --n; + case 2: + *src++ = c; + --n; + case 3: + *src++ = c; + --n; + } + + i_src = (void *)src; + + /* Do as many full-word copies as we can */ + for (; n >= 4; n -= 4) + *i_src++ = w32; + + src = (void *)i_src; + } + + /* Simple, byte oriented memset or the rest of count. */ + while (n--) + *src++ = c; + + return v_src; +} +#endif /* CONFIG_OPT_LIB_FUNCTION */ +EXPORT_SYMBOL(memset); +#endif /* __HAVE_ARCH_MEMSET */ diff --git a/arch/microblaze/lib/modsi3.S b/arch/microblaze/lib/modsi3.S new file mode 100644 index 000000000..64154865c --- /dev/null +++ b/arch/microblaze/lib/modsi3.S @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> + +/* +* modulo operation for 32 bit integers. +* Input : op1 in Reg r5 +* op2 in Reg r6 +* Output: op1 mod op2 in Reg r3 +*/ + + .text + .globl __modsi3 + .type __modsi3, @function + .ent __modsi3 + +__modsi3: + .frame r1, 0, r15 + + addik r1, r1, -16 + swi r28, r1, 0 + swi r29, r1, 4 + swi r30, r1, 8 + swi r31, r1, 12 + + beqi r6, div_by_zero /* div_by_zero division error */ + beqi r5, result_is_zero /* result is zero */ + bgeid r5, r5_pos + /* get the sign of the result [ depends only on the first arg] */ + add r28, r5, r0 + rsubi r5, r5, 0 /* make r5 positive */ +r5_pos: + bgei r6, r6_pos + rsubi r6, r6, 0 /* make r6 positive */ +r6_pos: + addik r3, r0, 0 /* clear mod */ + addik r30, r0, 0 /* clear div */ + addik r29, r0, 32 /* initialize the loop count */ +/* first part try to find the first '1' in the r5 */ +div1: + add r5, r5, r5 /* left shift logical r5 */ + bgeid r5, div1 + addik r29, r29, -1 +div2: + /* left shift logical r5 get the '1' into the carry */ + add r5, r5, r5 + addc r3, r3, r3 /* move that bit into the mod register */ + rsub r31, r6, r3 /* try to subtract (r30 a r6) */ + blti r31, mod_too_small + /* move the r31 to mod since the result was positive */ + or r3, r0, r31 + addik r30, r30, 1 +mod_too_small: + addik r29, r29, -1 + beqi r29, loop_end + add r30, r30, r30 /* shift in the '1' into div */ + bri div2 /* div2 */ +loop_end: + bgei r28, return_here + brid return_here + rsubi r3, r3, 0 /* negate the result */ +div_by_zero: +result_is_zero: + or r3, r0, r0 /* set result to 0 [both mod as well as div are 0] */ +return_here: +/* restore values of csrs and that of r3 and the divisor and the dividend */ + lwi r28, r1, 0 + lwi r29, r1, 4 + lwi r30, r1, 8 + lwi r31, r1, 12 + rtsd r15, 8 + addik r1, r1, 16 + +.size __modsi3, . - __modsi3 +.end __modsi3 diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c new file mode 100644 index 000000000..8e6bc17e2 --- /dev/null +++ b/arch/microblaze/lib/muldi3.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> + +#include "libgcc.h" + +#define W_TYPE_SIZE 32 + +#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1)) +#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2)) + +/* If we still don't have umul_ppmm, define it using plain C. */ +#if !defined(umul_ppmm) +#define umul_ppmm(w1, w0, u, v) \ + do { \ + unsigned long __x0, __x1, __x2, __x3; \ + unsigned short __ul, __vl, __uh, __vh; \ + \ + __ul = __ll_lowpart(u); \ + __uh = __ll_highpart(u); \ + __vl = __ll_lowpart(v); \ + __vh = __ll_highpart(v); \ + \ + __x0 = (unsigned long) __ul * __vl; \ + __x1 = (unsigned long) __ul * __vh; \ + __x2 = (unsigned long) __uh * __vl; \ + __x3 = (unsigned long) __uh * __vh; \ + \ + __x1 += __ll_highpart(__x0); /* this can't give carry */\ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += __ll_B; /* yes, add it in the proper pos */ \ + \ + (w1) = __x3 + __ll_highpart(__x1); \ + (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\ + } while (0) +#endif + +#if !defined(__umulsidi3) +#define __umulsidi3(u, v) ({ \ + DWunion __w; \ + umul_ppmm(__w.s.high, __w.s.low, u, v); \ + __w.ll; \ + }) +#endif + +long long __muldi3(long long u, long long v) +{ + const DWunion uu = {.ll = u}; + const DWunion vv = {.ll = v}; + DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)}; + + w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high + + (unsigned long) uu.s.high * (unsigned long) vv.s.low); + + return w.ll; +} +EXPORT_SYMBOL(__muldi3); diff --git a/arch/microblaze/lib/mulsi3.S b/arch/microblaze/lib/mulsi3.S new file mode 100644 index 000000000..d6703a4d9 --- /dev/null +++ b/arch/microblaze/lib/mulsi3.S @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> + +/* + * Multiply operation for 32 bit integers. + * Input : Operand1 in Reg r5 + * Operand2 in Reg r6 + * Output: Result [op1 * op2] in Reg r3 + */ + .text + .globl __mulsi3 + .type __mulsi3, @function + .ent __mulsi3 + +__mulsi3: + .frame r1, 0, r15 + add r3, r0, r0 + beqi r5, result_is_zero /* multiply by zero */ + beqi r6, result_is_zero /* multiply by zero */ + bgeid r5, r5_pos + xor r4, r5, r6 /* get the sign of the result */ + rsubi r5, r5, 0 /* make r5 positive */ +r5_pos: + bgei r6, r6_pos + rsubi r6, r6, 0 /* make r6 positive */ +r6_pos: + bri l1 +l2: + add r5, r5, r5 +l1: + srl r6, r6 + addc r7, r0, r0 + beqi r7, l2 + bneid r6, l2 + add r3, r3, r5 + blti r4, negateresult + rtsd r15, 8 + nop +negateresult: + rtsd r15, 8 + rsub r3, r3, r0 +result_is_zero: + rtsd r15, 8 + addi r3, r0, 0 + +.size __mulsi3, . - __mulsi3 +.end __mulsi3 diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S new file mode 100644 index 000000000..0e8cc2710 --- /dev/null +++ b/arch/microblaze/lib/uaccess_old.S @@ -0,0 +1,266 @@ +/* + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * Copyright (C) 2007 LynuxWorks, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/errno.h> +#include <linux/linkage.h> +#include <asm/page.h> + +/* + * int __strncpy_user(char *to, char *from, int len); + * + * Returns: + * -EFAULT for an exception + * len if we hit the buffer limit + * bytes copied + */ + + .text +.globl __strncpy_user; +.type __strncpy_user, @function +.align 4; +__strncpy_user: + + /* + * r5 - to + * r6 - from + * r7 - len + * r3 - temp count + * r4 - temp val + */ + beqid r7,3f + addik r3,r7,0 /* temp_count = len */ +1: + lbu r4,r6,r0 + beqid r4,2f + sb r4,r5,r0 + + addik r5,r5,1 + addik r6,r6,1 /* delay slot */ + + addik r3,r3,-1 + bnei r3,1b /* break on len */ +2: + rsubk r3,r3,r7 /* temp_count = len - temp_count */ +3: + rtsd r15,8 + nop + .size __strncpy_user, . - __strncpy_user + + .section .fixup, "ax" + .align 2 +4: + brid 3b + addik r3,r0, -EFAULT + + .section __ex_table, "a" + .word 1b,4b + +/* + * int __strnlen_user(char __user *str, int maxlen); + * + * Returns: + * 0 on error + * maxlen + 1 if no NUL byte found within maxlen bytes + * size of the string (including NUL byte) + */ + + .text +.globl __strnlen_user; +.type __strnlen_user, @function +.align 4; +__strnlen_user: + beqid r6,3f + addik r3,r6,0 +1: + lbu r4,r5,r0 + beqid r4,2f /* break on NUL */ + addik r3,r3,-1 /* delay slot */ + + bneid r3,1b + addik r5,r5,1 /* delay slot */ + + addik r3,r3,-1 /* for break on len */ +2: + rsubk r3,r3,r6 +3: + rtsd r15,8 + nop + .size __strnlen_user, . - __strnlen_user + + .section .fixup,"ax" +4: + brid 3b + addk r3,r0,r0 + + .section __ex_table,"a" + .word 1b,4b + +/* Loop unrolling for __copy_tofrom_user */ +#define COPY(offset) \ +1: lwi r4 , r6, 0x0000 + offset; \ +2: lwi r19, r6, 0x0004 + offset; \ +3: lwi r20, r6, 0x0008 + offset; \ +4: lwi r21, r6, 0x000C + offset; \ +5: lwi r22, r6, 0x0010 + offset; \ +6: lwi r23, r6, 0x0014 + offset; \ +7: lwi r24, r6, 0x0018 + offset; \ +8: lwi r25, r6, 0x001C + offset; \ +9: swi r4 , r5, 0x0000 + offset; \ +10: swi r19, r5, 0x0004 + offset; \ +11: swi r20, r5, 0x0008 + offset; \ +12: swi r21, r5, 0x000C + offset; \ +13: swi r22, r5, 0x0010 + offset; \ +14: swi r23, r5, 0x0014 + offset; \ +15: swi r24, r5, 0x0018 + offset; \ +16: swi r25, r5, 0x001C + offset; \ + .section __ex_table,"a"; \ + .word 1b, 33f; \ + .word 2b, 33f; \ + .word 3b, 33f; \ + .word 4b, 33f; \ + .word 5b, 33f; \ + .word 6b, 33f; \ + .word 7b, 33f; \ + .word 8b, 33f; \ + .word 9b, 33f; \ + .word 10b, 33f; \ + .word 11b, 33f; \ + .word 12b, 33f; \ + .word 13b, 33f; \ + .word 14b, 33f; \ + .word 15b, 33f; \ + .word 16b, 33f; \ + .text + +#define COPY_80(offset) \ + COPY(0x00 + offset);\ + COPY(0x20 + offset);\ + COPY(0x40 + offset);\ + COPY(0x60 + offset); + +/* + * int __copy_tofrom_user(char *to, char *from, int len) + * Return: + * 0 on success + * number of not copied bytes on error + */ + .text +.globl __copy_tofrom_user; +.type __copy_tofrom_user, @function +.align 4; +__copy_tofrom_user: + /* + * r5 - to + * r6 - from + * r7, r3 - count + * r4 - tempval + */ + beqid r7, 0f /* zero size is not likely */ + or r3, r5, r6 /* find if is any to/from unaligned */ + or r3, r3, r7 /* find if count is unaligned */ + andi r3, r3, 0x3 /* mask last 3 bits */ + bneid r3, bu1 /* if r3 is not zero then byte copying */ + or r3, r0, r0 + + rsubi r3, r7, PAGE_SIZE /* detect PAGE_SIZE */ + beqid r3, page; + or r3, r0, r0 + +w1: lw r4, r6, r3 /* at least one 4 byte copy */ +w2: sw r4, r5, r3 + addik r7, r7, -4 + bneid r7, w1 + addik r3, r3, 4 + addik r3, r7, 0 + rtsd r15, 8 + nop + + .section __ex_table,"a" + .word w1, 0f; + .word w2, 0f; + .text + +.align 4 /* Alignment is important to keep icache happy */ +page: /* Create room on stack and save registers for storign values */ + addik r1, r1, -40 + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r19, r1, 12 + swi r20, r1, 16 + swi r21, r1, 20 + swi r22, r1, 24 + swi r23, r1, 28 + swi r24, r1, 32 + swi r25, r1, 36 +loop: /* r4, r19, r20, r21, r22, r23, r24, r25 are used for storing values */ + /* Loop unrolling to get performance boost */ + COPY_80(0x000); + COPY_80(0x080); + COPY_80(0x100); + COPY_80(0x180); + /* copy loop */ + addik r6, r6, 0x200 + addik r7, r7, -0x200 + bneid r7, loop + addik r5, r5, 0x200 + + /* Restore register content */ + lwi r5, r1, 0 + lwi r6, r1, 4 + lwi r7, r1, 8 + lwi r19, r1, 12 + lwi r20, r1, 16 + lwi r21, r1, 20 + lwi r22, r1, 24 + lwi r23, r1, 28 + lwi r24, r1, 32 + lwi r25, r1, 36 + addik r1, r1, 40 + /* return back */ + addik r3, r0, 0 + rtsd r15, 8 + nop + +/* Fault case - return temp count */ +33: + addik r3, r7, 0 + /* Restore register content */ + lwi r5, r1, 0 + lwi r6, r1, 4 + lwi r7, r1, 8 + lwi r19, r1, 12 + lwi r20, r1, 16 + lwi r21, r1, 20 + lwi r22, r1, 24 + lwi r23, r1, 28 + lwi r24, r1, 32 + lwi r25, r1, 36 + addik r1, r1, 40 + /* return back */ + rtsd r15, 8 + nop + +.align 4 /* Alignment is important to keep icache happy */ +bu1: lbu r4,r6,r3 +bu2: sb r4,r5,r3 + addik r7,r7,-1 + bneid r7,bu1 + addik r3,r3,1 /* delay slot */ +0: + addik r3,r7,0 + rtsd r15,8 + nop + .size __copy_tofrom_user, . - __copy_tofrom_user + + .section __ex_table,"a" + .word bu1, 0b; + .word bu2, 0b; + .text diff --git a/arch/microblaze/lib/ucmpdi2.c b/arch/microblaze/lib/ucmpdi2.c new file mode 100644 index 000000000..c60e068ff --- /dev/null +++ b/arch/microblaze/lib/ucmpdi2.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> + +#include "libgcc.h" + +word_type __ucmpdi2(unsigned long long a, unsigned long long b) +{ + const DWunion au = {.ll = a}; + const DWunion bu = {.ll = b}; + + if ((unsigned int) au.s.high < (unsigned int) bu.s.high) + return 0; + else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) + return 2; + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + return 1; +} +EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/microblaze/lib/udivsi3.S b/arch/microblaze/lib/udivsi3.S new file mode 100644 index 000000000..acdc66723 --- /dev/null +++ b/arch/microblaze/lib/udivsi3.S @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> + +/* +* Unsigned divide operation. +* Input : Divisor in Reg r5 +* Dividend in Reg r6 +* Output: Result in Reg r3 +*/ + + .text + .globl __udivsi3 + .type __udivsi3, @function + .ent __udivsi3 + +__udivsi3: + + .frame r1, 0, r15 + + addik r1, r1, -12 + swi r29, r1, 0 + swi r30, r1, 4 + swi r31, r1, 8 + + beqi r6, div_by_zero /* div_by_zero /* division error */ + beqid r5, result_is_zero /* result is zero */ + addik r30, r0, 0 /* clear mod */ + addik r29, r0, 32 /* initialize the loop count */ + +/* check if r6 and r5 are equal - if yes, return 1 */ + rsub r18, r5, r6 + beqid r18, return_here + addik r3, r0, 1 + +/* check if (uns)r6 is greater than (uns)r5. in that case, just return 0 */ + xor r18, r5, r6 + bgeid r18, 16 + add r3, r0, r0 /* we would anyways clear r3 */ + blti r6, return_here /* r6[bit 31 = 1] hence is greater */ + bri checkr6 + rsub r18, r6, r5 /* microblazecmp */ + blti r18, return_here + +/* if r6 [bit 31] is set, then return result as 1 */ +checkr6: + bgti r6, div0 + brid return_here + addik r3, r0, 1 + +/* first part try to find the first '1' in the r5 */ +div0: + blti r5, div2 +div1: + add r5, r5, r5 /* left shift logical r5 */ + bgtid r5, div1 + addik r29, r29, -1 +div2: +/* left shift logical r5 get the '1' into the carry */ + add r5, r5, r5 + addc r30, r30, r30 /* move that bit into the mod register */ + rsub r31, r6, r30 /* try to subtract (r30 a r6) */ + blti r31, mod_too_small +/* move the r31 to mod since the result was positive */ + or r30, r0, r31 + addik r3, r3, 1 +mod_too_small: + addik r29, r29, -1 + beqi r29, loop_end + add r3, r3, r3 /* shift in the '1' into div */ + bri div2 /* div2 */ +loop_end: + bri return_here +div_by_zero: +result_is_zero: + or r3, r0, r0 /* set result to 0 */ +return_here: +/* restore values of csrs and that of r3 and the divisor and the dividend */ + lwi r29, r1, 0 + lwi r30, r1, 4 + lwi r31, r1, 8 + rtsd r15, 8 + addik r1, r1, 12 + +.size __udivsi3, . - __udivsi3 +.end __udivsi3 diff --git a/arch/microblaze/lib/umodsi3.S b/arch/microblaze/lib/umodsi3.S new file mode 100644 index 000000000..f4b814e88 --- /dev/null +++ b/arch/microblaze/lib/umodsi3.S @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> + +/* + * Unsigned modulo operation for 32 bit integers. + * Input : op1 in Reg r5 + * op2 in Reg r6 + * Output: op1 mod op2 in Reg r3 + */ + + .text + .globl __umodsi3 + .type __umodsi3, @function + .ent __umodsi3 + +__umodsi3: + .frame r1, 0, r15 + + addik r1, r1, -12 + swi r29, r1, 0 + swi r30, r1, 4 + swi r31, r1, 8 + + beqi r6, div_by_zero /* div_by_zero - division error */ + beqid r5, result_is_zero /* result is zero */ + addik r3, r0, 0 /* clear div */ + addik r30, r0, 0 /* clear mod */ + addik r29, r0, 32 /* initialize the loop count */ + +/* check if r6 and r5 are equal /* if yes, return 0 */ + rsub r18, r5, r6 + beqi r18, return_here + +/* check if (uns)r6 is greater than (uns)r5. in that case, just return r5 */ + xor r18, r5, r6 + bgeid r18, 16 + addik r3, r5, 0 + blti r6, return_here + bri $lcheckr6 + rsub r18, r5, r6 /* microblazecmp */ + bgti r18, return_here + +/* if r6 [bit 31] is set, then return result as r5-r6 */ +$lcheckr6: + bgtid r6, div0 + addik r3, r0, 0 + addik r18, r0, 0x7fffffff + and r5, r5, r18 + and r6, r6, r18 + brid return_here + rsub r3, r6, r5 +/* first part: try to find the first '1' in the r5 */ +div0: + blti r5, div2 +div1: + add r5, r5, r5 /* left shift logical r5 */ + bgeid r5, div1 + addik r29, r29, -1 +div2: + /* left shift logical r5 get the '1' into the carry */ + add r5, r5, r5 + addc r3, r3, r3 /* move that bit into the mod register */ + rsub r31, r6, r3 /* try to subtract (r3 a r6) */ + blti r31, mod_too_small + /* move the r31 to mod since the result was positive */ + or r3, r0, r31 + addik r30, r30, 1 +mod_too_small: + addik r29, r29, -1 + beqi r29, loop_end + add r30, r30, r30 /* shift in the '1' into div */ + bri div2 /* div2 */ +loop_end: + bri return_here +div_by_zero: +result_is_zero: + or r3, r0, r0 /* set result to 0 */ +return_here: +/* restore values of csrs and that of r3 and the divisor and the dividend */ + lwi r29, r1, 0 + lwi r30, r1, 4 + lwi r31, r1, 8 + rtsd r15, 8 + addik r1, r1, 12 + +.size __umodsi3, . - __umodsi3 +.end __umodsi3 diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile new file mode 100644 index 000000000..7313bd8ac --- /dev/null +++ b/arch/microblaze/mm/Makefile @@ -0,0 +1,8 @@ +# +# Makefile +# + +obj-y := consistent.o init.o + +obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o +obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c new file mode 100644 index 000000000..c9a278ac7 --- /dev/null +++ b/arch/microblaze/mm/consistent.c @@ -0,0 +1,222 @@ +/* + * Microblaze support for cache consistent memory. + * Copyright (C) 2010 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2010 PetaLogix + * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> + * + * Based on PowerPC version derived from arch/arm/mm/consistent.c + * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/export.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/stddef.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/bootmem.h> +#include <linux/highmem.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/gfp.h> +#include <linux/dma-noncoherent.h> + +#include <asm/pgalloc.h> +#include <linux/io.h> +#include <linux/hardirq.h> +#include <linux/mmu_context.h> +#include <asm/mmu.h> +#include <linux/uaccess.h> +#include <asm/pgtable.h> +#include <asm/cpuinfo.h> +#include <asm/tlbflush.h> + +#ifndef CONFIG_MMU +/* I have to use dcache values because I can't relate on ram size */ +# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) +#endif + +/* + * Consistent memory allocators. Used for DMA devices that want to + * share uncached memory with the processor core. + * My crufty no-MMU approach is simple. In the HW platform we can optionally + * mirror the DDR up above the processor cacheable region. So, memory accessed + * in this mirror region will not be cached. It's alloced from the same + * pool as normal memory, but the handle we return is shifted up into the + * uncached region. This will no doubt cause big problems if memory allocated + * here is not also freed properly. -- JW + */ +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + unsigned long order, vaddr; + void *ret; + unsigned int i, err = 0; + struct page *page, *end; + +#ifdef CONFIG_MMU + phys_addr_t pa; + struct vm_struct *area; + unsigned long va; +#endif + + if (in_interrupt()) + BUG(); + + /* Only allocate page size areas. */ + size = PAGE_ALIGN(size); + order = get_order(size); + + vaddr = __get_free_pages(gfp, order); + if (!vaddr) + return NULL; + + /* + * we need to ensure that there are no cachelines in use, + * or worse dirty in this area. + */ + flush_dcache_range(virt_to_phys((void *)vaddr), + virt_to_phys((void *)vaddr) + size); + +#ifndef CONFIG_MMU + ret = (void *)vaddr; + /* + * Here's the magic! Note if the uncached shadow is not implemented, + * it's up to the calling code to also test that condition and make + * other arranegments, such as manually flushing the cache and so on. + */ +# ifdef CONFIG_XILINX_UNCACHED_SHADOW + ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); +# endif + if ((unsigned int)ret > cpuinfo.dcache_base && + (unsigned int)ret < cpuinfo.dcache_high) + pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); + + /* dma_handle is same as physical (shadowed) address */ + *dma_handle = (dma_addr_t)ret; +#else + /* Allocate some common virtual space to map the new pages. */ + area = get_vm_area(size, VM_ALLOC); + if (!area) { + free_pages(vaddr, order); + return NULL; + } + va = (unsigned long) area->addr; + ret = (void *)va; + + /* This gives us the real physical address of the first page. */ + *dma_handle = pa = __virt_to_phys(vaddr); +#endif + + /* + * free wasted pages. We skip the first page since we know + * that it will have count = 1 and won't require freeing. + * We also mark the pages in use as reserved so that + * remap_page_range works. + */ + page = virt_to_page(vaddr); + end = page + (1 << order); + + split_page(page, order); + + for (i = 0; i < size && err == 0; i += PAGE_SIZE) { +#ifdef CONFIG_MMU + /* MS: This is the whole magic - use cache inhibit pages */ + err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); +#endif + + SetPageReserved(page); + page++; + } + + /* Free the otherwise unused pages. */ + while (page < end) { + __free_page(page); + page++; + } + + if (err) { + free_pages(vaddr, order); + return NULL; + } + + return ret; +} + +#ifdef CONFIG_MMU +static pte_t *consistent_virt_to_pte(void *vaddr) +{ + unsigned long addr = (unsigned long)vaddr; + + return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr); +} + +unsigned long consistent_virt_to_pfn(void *vaddr) +{ + pte_t *ptep = consistent_virt_to_pte(vaddr); + + if (pte_none(*ptep) || !pte_present(*ptep)) + return 0; + + return pte_pfn(*ptep); +} +#endif + +/* + * free page(s) as defined by the above mapping. + */ +void arch_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_addr, unsigned long attrs) +{ + struct page *page; + + if (in_interrupt()) + BUG(); + + size = PAGE_ALIGN(size); + +#ifndef CONFIG_MMU + /* Clear SHADOW_MASK bit in address, and free as per usual */ +# ifdef CONFIG_XILINX_UNCACHED_SHADOW + vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); +# endif + page = virt_to_page(vaddr); + + do { + __free_reserved_page(page); + page++; + } while (size -= PAGE_SIZE); +#else + do { + pte_t *ptep = consistent_virt_to_pte(vaddr); + unsigned long pfn; + + if (!pte_none(*ptep) && pte_present(*ptep)) { + pfn = pte_pfn(*ptep); + pte_clear(&init_mm, (unsigned int)vaddr, ptep); + if (pfn_valid(pfn)) { + page = pfn_to_page(pfn); + __free_reserved_page(page); + } + } + vaddr += PAGE_SIZE; + } while (size -= PAGE_SIZE); + + /* flush tlb */ + flush_tlb_all(); +#endif +} diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c new file mode 100644 index 000000000..202ad6a49 --- /dev/null +++ b/arch/microblaze/mm/fault.c @@ -0,0 +1,296 @@ +/* + * arch/microblaze/mm/fault.c + * + * Copyright (C) 2007 Xilinx, Inc. All rights reserved. + * + * Derived from "arch/ppc/mm/fault.c" + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Derived from "arch/i386/mm/fault.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * Modified by Cort Dougan and Paul Mackerras. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + */ + +#include <linux/extable.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/interrupt.h> + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/mmu.h> +#include <linux/mmu_context.h> +#include <linux/uaccess.h> +#include <asm/exceptions.h> + +static unsigned long pte_misses; /* updated by do_page_fault() */ +static unsigned long pte_errors; /* updated by do_page_fault() */ + +/* + * Check whether the instruction at regs->pc is a store using + * an update addressing form which will update r1. + */ +static int store_updates_sp(struct pt_regs *regs) +{ + unsigned int inst; + + if (get_user(inst, (unsigned int __user *)regs->pc)) + return 0; + /* check for 1 in the rD field */ + if (((inst >> 21) & 0x1f) != 1) + return 0; + /* check for store opcodes */ + if ((inst & 0xd0000000) == 0xd0000000) + return 1; + return 0; +} + + +/* + * bad_page_fault is called when we have a bad access from the kernel. + * It is called from do_page_fault above and from some of the procedures + * in traps.c. + */ +void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) +{ + const struct exception_table_entry *fixup; +/* MS: no context */ + /* Are we prepared to handle this fault? */ + fixup = search_exception_tables(regs->pc); + if (fixup) { + regs->pc = fixup->fixup; + return; + } + + /* kernel has accessed a bad area */ + die("kernel access of bad area", regs, sig); +} + +/* + * The error_code parameter is ESR for a data fault, + * 0 for an instruction fault. + */ +void do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + int code = SEGV_MAPERR; + int is_write = error_code & ESR_S; + vm_fault_t fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + + regs->ear = address; + regs->esr = error_code; + + /* On a kernel SLB miss we can only check for a valid exception entry */ + if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) { + pr_warn("kernel task_size exceed"); + _exception(SIGSEGV, regs, code, address); + } + + /* for instr TLB miss and instr storage exception ESR_S is undefined */ + if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) + is_write = 0; + + if (unlikely(faulthandler_disabled() || !mm)) { + if (kernel_mode(regs)) + goto bad_area_nosemaphore; + + /* faulthandler_disabled() in user mode is really bad, + as is current->mm == NULL. */ + pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n", + mm); + pr_emerg("r15 = %lx MSR = %lx\n", + regs->r15, regs->msr); + die("Weird page fault", regs, SIGSEGV); + } + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + + /* When running in the kernel we expect faults to occur only to + * addresses in user space. All other faults represent errors in the + * kernel and should generate an OOPS. Unfortunately, in the case of an + * erroneous fault occurring in a code path which already holds mmap_sem + * we will deadlock attempting to validate the fault against the + * address space. Luckily the kernel only validly references user + * space from well defined areas of code, which are listed in the + * exceptions table. + * + * As the vast majority of faults will be valid we will only perform + * the source reference check when there is a possibility of a deadlock. + * Attempt to lock the address space, if we cannot we then validate the + * source. If this is invalid we can skip the address space check, + * thus avoiding the deadlock. + */ + if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (kernel_mode(regs) && !search_exception_tables(regs->pc)) + goto bad_area_nosemaphore; + +retry: + down_read(&mm->mmap_sem); + } + + vma = find_vma(mm, address); + if (unlikely(!vma)) + goto bad_area; + + if (vma->vm_start <= address) + goto good_area; + + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) + goto bad_area; + + if (unlikely(!is_write)) + goto bad_area; + + /* + * N.B. The ABI allows programs to access up to + * a few hundred bytes below the stack pointer (TBD). + * The kernel signal delivery code writes up to about 1.5kB + * below the stack pointer (r1) before decrementing it. + * The exec code can write slightly over 640kB to the stack + * before setting the user r1. Thus we allow the stack to + * expand to 1MB without further checks. + */ + if (unlikely(address + 0x100000 < vma->vm_end)) { + + /* get user regs even if this fault is in kernel mode */ + struct pt_regs *uregs = current->thread.regs; + if (uregs == NULL) + goto bad_area; + + /* + * A user-mode access to an address a long way below + * the stack pointer is only valid if the instruction + * is one which would update the stack pointer to the + * address accessed if the instruction completed, + * i.e. either stwu rs,n(r1) or stwux rs,r1,rb + * (or the byte, halfword, float or double forms). + * + * If we don't check this then any write to the area + * between the last mapped region and the stack will + * expand the stack rather than segfaulting. + */ + if (address + 2048 < uregs->r1 + && (kernel_mode(regs) || !store_updates_sp(regs))) + goto bad_area; + } + if (expand_stack(vma, address)) + goto bad_area; + +good_area: + code = SEGV_ACCERR; + + /* a write */ + if (unlikely(is_write)) { + if (unlikely(!(vma->vm_flags & VM_WRITE))) + goto bad_area; + flags |= FAULT_FLAG_WRITE; + /* a read */ + } else { + /* protection fault */ + if (unlikely(error_code & 0x08000000)) + goto bad_area; + if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC)))) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (unlikely(fault & VM_FAULT_MAJOR)) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + + /* + * No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } + + up_read(&mm->mmap_sem); + + /* + * keep track of tlb+htab misses that are good addrs but + * just need pte's created via handle_mm_fault() + * -- Cort + */ + pte_misses++; + return; + +bad_area: + up_read(&mm->mmap_sem); + +bad_area_nosemaphore: + pte_errors++; + + /* User mode accesses cause a SIGSEGV */ + if (user_mode(regs)) { + _exception(SIGSEGV, regs, code, address); + return; + } + + bad_page_fault(regs, address, SIGSEGV); + return; + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +out_of_memory: + up_read(&mm->mmap_sem); + if (!user_mode(regs)) + bad_page_fault(regs, address, SIGKILL); + else + pagefault_out_of_memory(); + return; + +do_sigbus: + up_read(&mm->mmap_sem); + if (user_mode(regs)) { + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); + return; + } + bad_page_fault(regs, address, SIGBUS); +} diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c new file mode 100644 index 000000000..d7569f77f --- /dev/null +++ b/arch/microblaze/mm/highmem.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * highmem.c: virtual kernel memory mappings for high memory + * + * PowerPC version, stolen from the i386 version. + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terrabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> + * + * Reworked for PowerPC by various contributors. Moved from + * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. + */ + +#include <linux/export.h> +#include <linux/highmem.h> + +/* + * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap + * gives a more generic (and caching) interface. But kmap_atomic can + * be used in IRQ contexts, so in some (very limited) cases we need + * it. + */ +#include <asm/tlbflush.h> + +void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + + unsigned long vaddr; + int idx, type; + + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(kmap_pte-idx))); +#endif + set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); + local_flush_tlb_page(NULL, vaddr); + + return (void *) vaddr; +} +EXPORT_SYMBOL(kmap_atomic_prot); + +void __kunmap_atomic(void *kvaddr) +{ + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + int type; + unsigned int idx; + + if (vaddr < __fix_to_virt(FIX_KMAP_END)) { + pagefault_enable(); + preempt_enable(); + return; + } + + type = kmap_atomic_idx(); + + idx = type + KM_TYPE_NR * smp_processor_id(); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); +#endif + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + local_flush_tlb_page(NULL, vaddr); + + kmap_atomic_idx_pop(); + pagefault_enable(); + preempt_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c new file mode 100644 index 000000000..df6de7ccd --- /dev/null +++ b/arch/microblaze/mm/init.c @@ -0,0 +1,385 @@ +/* + * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/bootmem.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/memblock.h> +#include <linux/mm.h> /* mem_init */ +#include <linux/initrd.h> +#include <linux/pagemap.h> +#include <linux/pfn.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include <linux/export.h> + +#include <asm/page.h> +#include <asm/mmu_context.h> +#include <asm/pgalloc.h> +#include <asm/sections.h> +#include <asm/tlb.h> +#include <asm/fixmap.h> + +/* Use for MMU and noMMU because of PCI generic code */ +int mem_init_done; + +#ifndef CONFIG_MMU +unsigned int __page_offset; +EXPORT_SYMBOL(__page_offset); +#endif /* CONFIG_MMU */ + +char *klimit = _end; + +/* + * Initialize the bootmem system and give it all the memory we + * have available. + */ +unsigned long memory_start; +EXPORT_SYMBOL(memory_start); +unsigned long memory_size; +EXPORT_SYMBOL(memory_size); +unsigned long lowmem_size; + +#ifdef CONFIG_HIGHMEM +pte_t *kmap_pte; +EXPORT_SYMBOL(kmap_pte); +pgprot_t kmap_prot; +EXPORT_SYMBOL(kmap_prot); + +static inline pte_t *virt_to_kpte(unsigned long vaddr) +{ + return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), + vaddr), vaddr); +} + +static void __init highmem_init(void) +{ + pr_debug("%x\n", (u32)PKMAP_BASE); + map_page(PKMAP_BASE, 0, 0); /* XXX gross */ + pkmap_page_table = virt_to_kpte(PKMAP_BASE); + + kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); + kmap_prot = PAGE_KERNEL; +} + +static void highmem_setup(void) +{ + unsigned long pfn; + + for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { + struct page *page = pfn_to_page(pfn); + + /* FIXME not sure about */ + if (!memblock_is_reserved(pfn << PAGE_SHIFT)) + free_highmem_page(page); + } +} +#endif /* CONFIG_HIGHMEM */ + +/* + * paging_init() sets up the page tables - in fact we've already done this. + */ +static void __init paging_init(void) +{ + unsigned long zones_size[MAX_NR_ZONES]; +#ifdef CONFIG_MMU + int idx; + + /* Setup fixmaps */ + for (idx = 0; idx < __end_of_fixed_addresses; idx++) + clear_fixmap(idx); +#endif + + /* Clean every zones */ + memset(zones_size, 0, sizeof(zones_size)); + +#ifdef CONFIG_HIGHMEM + highmem_init(); + + zones_size[ZONE_DMA] = max_low_pfn; + zones_size[ZONE_HIGHMEM] = max_pfn; +#else + zones_size[ZONE_DMA] = max_pfn; +#endif + + /* We don't have holes in memory map */ + free_area_init_nodes(zones_size); +} + +void __init setup_memory(void) +{ + struct memblock_region *reg; + +#ifndef CONFIG_MMU + u32 kernel_align_start, kernel_align_size; + + /* Find main memory where is the kernel */ + for_each_memblock(memory, reg) { + memory_start = (u32)reg->base; + lowmem_size = reg->size; + if ((memory_start <= (u32)_text) && + ((u32)_text <= (memory_start + lowmem_size - 1))) { + memory_size = lowmem_size; + PAGE_OFFSET = memory_start; + pr_info("%s: Main mem: 0x%x, size 0x%08x\n", + __func__, (u32) memory_start, + (u32) memory_size); + break; + } + } + + if (!memory_start || !memory_size) { + panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", + __func__, (u32) memory_start, (u32) memory_size); + } + + /* reservation of region where is the kernel */ + kernel_align_start = PAGE_DOWN((u32)_text); + /* ALIGN can be remove because _end in vmlinux.lds.S is align */ + kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; + pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", + __func__, kernel_align_start, kernel_align_start + + kernel_align_size, kernel_align_size); + memblock_reserve(kernel_align_start, kernel_align_size); +#endif + /* + * Kernel: + * start: base phys address of kernel - page align + * end: base phys address of kernel - page align + * + * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) + * max_low_pfn + * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) + */ + + /* memory start is from the kernel end (aligned) to higher addr */ + min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ + /* RAM is assumed contiguous */ + max_mapnr = memory_size >> PAGE_SHIFT; + max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; + max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; + + pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr); + pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); + pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); + pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); + + /* Add active regions with valid PFNs */ + for_each_memblock(memory, reg) { + unsigned long start_pfn, end_pfn; + + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); + memblock_set_node(start_pfn << PAGE_SHIFT, + (end_pfn - start_pfn) << PAGE_SHIFT, + &memblock.memory, 0); + } + + /* XXX need to clip this if using highmem? */ + sparse_memory_present_with_active_regions(0); + + paging_init(); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + free_reserved_area((void *)start, (void *)end, -1, "initrd"); +} +#endif + +void free_initmem(void) +{ + free_initmem_default(-1); +} + +void __init mem_init(void) +{ + high_memory = (void *)__va(memory_start + lowmem_size - 1); + + /* this will put all memory onto the freelists */ + free_all_bootmem(); +#ifdef CONFIG_HIGHMEM + highmem_setup(); +#endif + + mem_init_print_info(NULL); +#ifdef CONFIG_MMU + pr_info("Kernel virtual memory layout:\n"); + pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); +#ifdef CONFIG_HIGHMEM + pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); +#endif /* CONFIG_HIGHMEM */ + pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", + ioremap_bot, ioremap_base); + pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", + (unsigned long)VMALLOC_START, VMALLOC_END); +#endif + mem_init_done = 1; +} + +#ifndef CONFIG_MMU +int page_is_ram(unsigned long pfn) +{ + return __range_ok(pfn, 0); +} +#else +int page_is_ram(unsigned long pfn) +{ + return pfn < max_low_pfn; +} + +/* + * Check for command-line options that affect what MMU_init will do. + */ +static void mm_cmdline_setup(void) +{ + unsigned long maxmem = 0; + char *p = cmd_line; + + /* Look for mem= option on command line */ + p = strstr(cmd_line, "mem="); + if (p) { + p += 4; + maxmem = memparse(p, &p); + if (maxmem && memory_size > maxmem) { + memory_size = maxmem; + memblock.memory.regions[0].size = memory_size; + } + } +} + +/* + * MMU_init_hw does the chip-specific initialization of the MMU hardware. + */ +static void __init mmu_init_hw(void) +{ + /* + * The Zone Protection Register (ZPR) defines how protection will + * be applied to every page which is a member of a given zone. At + * present, we utilize only two of the zones. + * The zone index bits (of ZSEL) in the PTE are used for software + * indicators, except the LSB. For user access, zone 1 is used, + * for kernel access, zone 0 is used. We set all but zone 1 + * to zero, allowing only kernel access as indicated in the PTE. + * For zone 1, we set a 01 binary (a value of 10 will not work) + * to allow user access as indicated in the PTE. This also allows + * kernel access as indicated in the PTE. + */ + __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ + "mts rzpr, r11;" + : : : "r11"); +} + +/* + * MMU_init sets up the basic memory mappings for the kernel, + * including both RAM and possibly some I/O regions, + * and sets up the page tables and the MMU hardware ready to go. + */ + +/* called from head.S */ +asmlinkage void __init mmu_init(void) +{ + unsigned int kstart, ksize; + + if (!memblock.reserved.cnt) { + pr_emerg("Error memory count\n"); + machine_restart(NULL); + } + + if ((u32) memblock.memory.regions[0].size < 0x400000) { + pr_emerg("Memory must be greater than 4MB\n"); + machine_restart(NULL); + } + + if ((u32) memblock.memory.regions[0].size < kernel_tlb) { + pr_emerg("Kernel size is greater than memory node\n"); + machine_restart(NULL); + } + + /* Find main memory where the kernel is */ + memory_start = (u32) memblock.memory.regions[0].base; + lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; + + if (lowmem_size > CONFIG_LOWMEM_SIZE) { + lowmem_size = CONFIG_LOWMEM_SIZE; +#ifndef CONFIG_HIGHMEM + memory_size = lowmem_size; +#endif + } + + mm_cmdline_setup(); /* FIXME parse args from command line - not used */ + + /* + * Map out the kernel text/data/bss from the available physical + * memory. + */ + kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ + /* kernel size */ + ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); + memblock_reserve(kstart, ksize); + +#if defined(CONFIG_BLK_DEV_INITRD) + /* Remove the init RAM disk from the available memory. */ + if (initrd_start) { + unsigned long size; + size = initrd_end - initrd_start; + memblock_reserve(__virt_to_phys(initrd_start), size); + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Initialize the MMU hardware */ + mmu_init_hw(); + + /* Map in all of RAM starting at CONFIG_KERNEL_START */ + mapin_ram(); + + /* Extend vmalloc and ioremap area as big as possible */ +#ifdef CONFIG_HIGHMEM + ioremap_base = ioremap_bot = PKMAP_BASE; +#else + ioremap_base = ioremap_bot = FIXADDR_START; +#endif + + /* Initialize the context management stuff */ + mmu_context_init(); + + /* Shortly after that, the entire linear mapping will be available */ + /* This will also cause that unflatten device tree will be allocated + * inside 768MB limit */ + memblock_set_current_limit(memory_start + lowmem_size - 1); +} + +/* This is only called until mem_init is done. */ +void __init *early_get_page(void) +{ + /* + * Mem start + kernel_tlb -> here is limit + * because of mem mapping from head.S + */ + return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, + memory_start + kernel_tlb)); +} + +#endif /* CONFIG_MMU */ + +void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) +{ + void *p; + + if (mem_init_done) + p = kzalloc(size, mask); + else { + p = alloc_bootmem(size); + if (p) + memset(p, 0, size); + } + return p; +} diff --git a/arch/microblaze/mm/mmu_context.c b/arch/microblaze/mm/mmu_context.c new file mode 100644 index 000000000..26ff82f4f --- /dev/null +++ b/arch/microblaze/mm/mmu_context.c @@ -0,0 +1,70 @@ +/* + * This file contains the routines for handling the MMU. + * + * Copyright (C) 2007 Xilinx, Inc. All rights reserved. + * + * Derived from arch/ppc/mm/4xx_mmu.c: + * -- paulus + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/mm.h> +#include <linux/init.h> + +#include <asm/tlbflush.h> +#include <asm/mmu_context.h> + +mm_context_t next_mmu_context; +unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; +atomic_t nr_free_contexts; +struct mm_struct *context_mm[LAST_CONTEXT+1]; + +/* + * Initialize the context management stuff. + */ +void __init mmu_context_init(void) +{ + /* + * The use of context zero is reserved for the kernel. + * This code assumes FIRST_CONTEXT < 32. + */ + context_map[0] = (1 << FIRST_CONTEXT) - 1; + next_mmu_context = FIRST_CONTEXT; + atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); +} + +/* + * Steal a context from a task that has one at the moment. + * + * This isn't an LRU system, it just frees up each context in + * turn (sort-of pseudo-random replacement :). This would be the + * place to implement an LRU scheme if anyone were motivated to do it. + */ +void steal_context(void) +{ + struct mm_struct *mm; + + /* free up context `next_mmu_context' */ + /* if we shouldn't free context 0, don't... */ + if (next_mmu_context < FIRST_CONTEXT) + next_mmu_context = FIRST_CONTEXT; + mm = context_mm[next_mmu_context]; + flush_tlb_mm(mm); + destroy_context(mm); +} diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c new file mode 100644 index 000000000..7f525962c --- /dev/null +++ b/arch/microblaze/mm/pgtable.c @@ -0,0 +1,260 @@ +/* + * This file contains the routines setting up the linux page tables. + * + * Copyright (C) 2008 Michal Simek + * Copyright (C) 2008 PetaLogix + * + * Copyright (C) 2007 Xilinx, Inc. All rights reserved. + * + * Derived from arch/ppc/mm/pgtable.c: + * -- paulus + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <linux/mm_types.h> + +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <linux/io.h> +#include <asm/mmu.h> +#include <asm/sections.h> +#include <asm/fixmap.h> + +unsigned long ioremap_base; +unsigned long ioremap_bot; +EXPORT_SYMBOL(ioremap_bot); + +#ifndef CONFIG_SMP +struct pgtable_cache_struct quicklists; +#endif + +static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, + unsigned long flags) +{ + unsigned long v, i; + phys_addr_t p; + int err; + + /* + * Choose an address to map it to. + * Once the vmalloc system is running, we use it. + * Before then, we use space going down from ioremap_base + * (ioremap_bot records where we're up to). + */ + p = addr & PAGE_MASK; + size = PAGE_ALIGN(addr + size) - p; + + /* + * Don't allow anybody to remap normal RAM that we're using. + * mem_init() sets high_memory so only do the check after that. + * + * However, allow remap of rootfs: TBD + */ + + if (mem_init_done && + p >= memory_start && p < virt_to_phys(high_memory) && + !(p >= __virt_to_phys((phys_addr_t)__bss_stop) && + p < __virt_to_phys((phys_addr_t)__bss_stop))) { + pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n", + (unsigned long)p, __builtin_return_address(0)); + return NULL; + } + + if (size == 0) + return NULL; + + /* + * Is it already mapped? If the whole area is mapped then we're + * done, otherwise remap it since we want to keep the virt addrs for + * each request contiguous. + * + * We make the assumption here that if the bottom and top + * of the range we want are mapped then it's mapped to the + * same virt address (and this is contiguous). + * -- Cort + */ + + if (mem_init_done) { + struct vm_struct *area; + area = get_vm_area(size, VM_IOREMAP); + if (area == NULL) + return NULL; + v = (unsigned long) area->addr; + } else { + v = (ioremap_bot -= size); + } + + if ((flags & _PAGE_PRESENT) == 0) + flags |= _PAGE_KERNEL; + if (flags & _PAGE_NO_CACHE) + flags |= _PAGE_GUARDED; + + err = 0; + for (i = 0; i < size && err == 0; i += PAGE_SIZE) + err = map_page(v + i, p + i, flags); + if (err) { + if (mem_init_done) + vfree((void *)v); + return NULL; + } + + return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); +} + +void __iomem *ioremap(phys_addr_t addr, unsigned long size) +{ + return __ioremap(addr, size, _PAGE_NO_CACHE); +} +EXPORT_SYMBOL(ioremap); + +void iounmap(volatile void __iomem *addr) +{ + if ((__force void *)addr > high_memory && + (unsigned long) addr < ioremap_bot) + vfree((void *) (PAGE_MASK & (unsigned long) addr)); +} +EXPORT_SYMBOL(iounmap); + + +int map_page(unsigned long va, phys_addr_t pa, int flags) +{ + pmd_t *pd; + pte_t *pg; + int err = -ENOMEM; + /* Use upper 10 bits of VA to index the first level map */ + pd = pmd_offset(pgd_offset_k(va), va); + /* Use middle 10 bits of VA to index the second-level map */ + pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ + /* pg = pte_alloc_kernel(&init_mm, pd, va); */ + + if (pg != NULL) { + err = 0; + set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, + __pgprot(flags))); + if (unlikely(mem_init_done)) + _tlbie(va); + } + return err; +} + +/* + * Map in all of physical memory starting at CONFIG_KERNEL_START. + */ +void __init mapin_ram(void) +{ + unsigned long v, p, s, f; + + v = CONFIG_KERNEL_START; + p = memory_start; + for (s = 0; s < lowmem_size; s += PAGE_SIZE) { + f = _PAGE_PRESENT | _PAGE_ACCESSED | + _PAGE_SHARED | _PAGE_HWEXEC; + if ((char *) v < _stext || (char *) v >= _etext) + f |= _PAGE_WRENABLE; + else + /* On the MicroBlaze, no user access + forces R/W kernel access */ + f |= _PAGE_USER; + map_page(v, p, f); + v += PAGE_SIZE; + p += PAGE_SIZE; + } +} + +/* is x a power of 2? */ +#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) + +/* Scan the real Linux page tables and return a PTE pointer for + * a virtual address in a context. + * Returns true (1) if PTE was found, zero otherwise. The pointer to + * the PTE pointer is unmodified if PTE is not found. + */ +static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int retval = 0; + + pgd = pgd_offset(mm, addr & PAGE_MASK); + if (pgd) { + pmd = pmd_offset(pgd, addr & PAGE_MASK); + if (pmd_present(*pmd)) { + pte = pte_offset_kernel(pmd, addr & PAGE_MASK); + if (pte) { + retval = 1; + *ptep = pte; + } + } + } + return retval; +} + +/* Find physical address for this virtual address. Normally used by + * I/O functions, but anyone can call it. + */ +unsigned long iopa(unsigned long addr) +{ + unsigned long pa; + + pte_t *pte; + struct mm_struct *mm; + + /* Allow mapping of user addresses (within the thread) + * for DMA if necessary. + */ + if (addr < TASK_SIZE) + mm = current->mm; + else + mm = &init_mm; + + pa = 0; + if (get_pteptr(mm, addr, &pte)) + pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); + + return pa; +} + +__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + pte_t *pte; + if (mem_init_done) { + pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + } else { + pte = (pte_t *)early_get_page(); + if (pte) + clear_page(pte); + } + return pte; +} + +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) + BUG(); + + map_page(address, phys, pgprot_val(flags)); +} diff --git a/arch/microblaze/oprofile/Makefile b/arch/microblaze/oprofile/Makefile new file mode 100644 index 000000000..107f2f55d --- /dev/null +++ b/arch/microblaze/oprofile/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# arch/microblaze/oprofile/Makefile +# + +obj-$(CONFIG_OPROFILE) += oprofile.o + +DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \ + oprof.o cpu_buffer.o buffer_sync.o \ + event_buffer.o oprofile_files.o \ + oprofilefs.o oprofile_stats.o \ + timer_int.o ) + +oprofile-y := $(DRIVER_OBJS) microblaze_oprofile.o diff --git a/arch/microblaze/oprofile/microblaze_oprofile.c b/arch/microblaze/oprofile/microblaze_oprofile.c new file mode 100644 index 000000000..def17e598 --- /dev/null +++ b/arch/microblaze/oprofile/microblaze_oprofile.c @@ -0,0 +1,22 @@ +/* + * Microblaze oprofile code + * + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/oprofile.h> +#include <linux/init.h> + +int __init oprofile_arch_init(struct oprofile_operations *ops) +{ + return -1; +} + +void oprofile_arch_exit(void) +{ +} diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile new file mode 100644 index 000000000..d1114fbd4 --- /dev/null +++ b/arch/microblaze/pci/Makefile @@ -0,0 +1,6 @@ +# +# Makefile +# + +obj-$(CONFIG_PCI) += pci-common.o indirect_pci.o iomap.o +obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o diff --git a/arch/microblaze/pci/indirect_pci.c b/arch/microblaze/pci/indirect_pci.c new file mode 100644 index 000000000..24030837a --- /dev/null +++ b/arch/microblaze/pci/indirect_pci.c @@ -0,0 +1,162 @@ +/* + * Support for indirect PCI bridges. + * + * Copyright (C) 1998 Gabriel Paubert. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/init.h> + +#include <linux/io.h> +#include <asm/pci-bridge.h> + +static int +indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + volatile void __iomem *cfg_data; + u8 cfg_type = 0; + u32 bus_no, reg; + + if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) { + if (bus->number != hose->first_busno) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE) + if (bus->number != hose->first_busno) + cfg_type = 1; + + bus_no = (bus->number == hose->first_busno) ? + hose->self_busno : bus->number; + + if (hose->indirect_type & INDIRECT_TYPE_EXT_REG) + reg = ((offset & 0xf00) << 16) | (offset & 0xfc); + else + reg = offset & 0xfc; /* Only 3 bits for function */ + + if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN) + out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + else + out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + + /* + * Note: the caller has already checked that offset is + * suitably aligned and that len is 1, 2 or 4. + */ + cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */ + switch (len) { + case 1: + *val = in_8(cfg_data); + break; + case 2: + *val = in_le16(cfg_data); + break; + default: + *val = in_le32(cfg_data); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int +indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + volatile void __iomem *cfg_data; + u8 cfg_type = 0; + u32 bus_no, reg; + + if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) { + if (bus->number != hose->first_busno) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE) + if (bus->number != hose->first_busno) + cfg_type = 1; + + bus_no = (bus->number == hose->first_busno) ? + hose->self_busno : bus->number; + + if (hose->indirect_type & INDIRECT_TYPE_EXT_REG) + reg = ((offset & 0xf00) << 16) | (offset & 0xfc); + else + reg = offset & 0xfc; + + if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN) + out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + else + out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + + /* suppress setting of PCI_PRIMARY_BUS */ + if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) + if ((offset == PCI_PRIMARY_BUS) && + (bus->number == hose->first_busno)) + val &= 0xffffff00; + + /* Workaround for PCI_28 Errata in 440EPx/GRx */ + if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) && + offset == PCI_CACHE_LINE_SIZE) { + val = 0; + } + + /* + * Note: the caller has already checked that offset is + * suitably aligned and that len is 1, 2 or 4. + */ + cfg_data = hose->cfg_data + (offset & 3); + switch (len) { + case 1: + out_8(cfg_data, val); + break; + case 2: + out_le16(cfg_data, val); + break; + default: + out_le32(cfg_data, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops indirect_pci_ops = { + .read = indirect_read_config, + .write = indirect_write_config, +}; + +void __init +setup_indirect_pci(struct pci_controller *hose, + resource_size_t cfg_addr, + resource_size_t cfg_data, u32 flags) +{ + resource_size_t base = cfg_addr & PAGE_MASK; + void __iomem *mbase; + + mbase = ioremap(base, PAGE_SIZE); + hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); + if ((cfg_data & PAGE_MASK) != base) + mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); + hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); + hose->ops = &indirect_pci_ops; + hose->indirect_type = flags; +} diff --git a/arch/microblaze/pci/iomap.c b/arch/microblaze/pci/iomap.c new file mode 100644 index 000000000..bde74af4c --- /dev/null +++ b/arch/microblaze/pci/iomap.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ppc64 "iomap" interface implementation. + * + * (C) Copyright 2004 Linus Torvalds + */ +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/mm.h> +#include <linux/export.h> +#include <linux/io.h> +#include <asm/pci-bridge.h> + +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ + if (isa_vaddr_is_ioport(addr)) + return; + if (pcibios_vaddr_is_ioport(addr)) + return; + iounmap(addr); +} +EXPORT_SYMBOL(pci_iounmap); diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c new file mode 100644 index 000000000..2ffd171af --- /dev/null +++ b/arch/microblaze/pci/pci-common.c @@ -0,0 +1,1128 @@ +/* + * Contains common pci routines for ALL ppc platform + * (based on pci_32.c and pci_64.c) + * + * Port for PPC64 David Engebretsen, IBM Corp. + * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. + * + * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM + * Rework, based on alpha PCI code. + * + * Common pmac/prep/chrp pci routines. -- Cort + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/mm.h> +#include <linux/shmem_fs.h> +#include <linux/list.h> +#include <linux/syscalls.h> +#include <linux/irq.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/export.h> + +#include <asm/processor.h> +#include <linux/io.h> +#include <asm/pci-bridge.h> +#include <asm/byteorder.h> + +static DEFINE_SPINLOCK(hose_spinlock); +LIST_HEAD(hose_list); + +/* XXX kill that some day ... */ +static int global_phb_number; /* Global phb counter */ + +/* ISA Memory physical address */ +resource_size_t isa_mem_base; + +unsigned long isa_io_base; +EXPORT_SYMBOL(isa_io_base); + +static int pci_bus_count; + +struct pci_controller *pcibios_alloc_controller(struct device_node *dev) +{ + struct pci_controller *phb; + + phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); + if (!phb) + return NULL; + spin_lock(&hose_spinlock); + phb->global_number = global_phb_number++; + list_add_tail(&phb->list_node, &hose_list); + spin_unlock(&hose_spinlock); + phb->dn = dev; + phb->is_dynamic = mem_init_done; + return phb; +} + +void pcibios_free_controller(struct pci_controller *phb) +{ + spin_lock(&hose_spinlock); + list_del(&phb->list_node); + spin_unlock(&hose_spinlock); + + if (phb->is_dynamic) + kfree(phb); +} + +static resource_size_t pcibios_io_size(const struct pci_controller *hose) +{ + return resource_size(&hose->io_resource); +} + +int pcibios_vaddr_is_ioport(void __iomem *address) +{ + int ret = 0; + struct pci_controller *hose; + resource_size_t size; + + spin_lock(&hose_spinlock); + list_for_each_entry(hose, &hose_list, list_node) { + size = pcibios_io_size(hose); + if (address >= hose->io_base_virt && + address < (hose->io_base_virt + size)) { + ret = 1; + break; + } + } + spin_unlock(&hose_spinlock); + return ret; +} + +unsigned long pci_address_to_pio(phys_addr_t address) +{ + struct pci_controller *hose; + resource_size_t size; + unsigned long ret = ~0; + + spin_lock(&hose_spinlock); + list_for_each_entry(hose, &hose_list, list_node) { + size = pcibios_io_size(hose); + if (address >= hose->io_base_phys && + address < (hose->io_base_phys + size)) { + unsigned long base = + (unsigned long)hose->io_base_virt - _IO_BASE; + ret = base + (address - hose->io_base_phys); + break; + } + } + spin_unlock(&hose_spinlock); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_address_to_pio); + +/* This routine is meant to be used early during boot, when the + * PCI bus numbers have not yet been assigned, and you need to + * issue PCI config cycles to an OF device. + * It could also be used to "fix" RTAS config cycles if you want + * to set pci_assign_all_buses to 1 and still use RTAS for PCI + * config cycles. + */ +struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node) +{ + while (node) { + struct pci_controller *hose, *tmp; + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + if (hose->dn == node) + return hose; + node = node->parent; + } + return NULL; +} + +void pcibios_set_master(struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + +/* + * Platform support for /proc/bus/pci/X/Y mmap()s. + */ + +int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma) +{ + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + resource_size_t ioaddr = pci_resource_start(pdev, bar); + + if (!hose) + return -EINVAL; /* should never happen */ + + /* Convert to an offset within this PCI controller */ + ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE; + + vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT; + return 0; +} + +/* + * This one is used by /dev/mem and fbdev who have no clue about the + * PCI device, it tries to find the PCI device first and calls the + * above routine + */ +pgprot_t pci_phys_mem_access_prot(struct file *file, + unsigned long pfn, + unsigned long size, + pgprot_t prot) +{ + struct pci_dev *pdev = NULL; + struct resource *found = NULL; + resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; + int i; + + if (page_is_ram(pfn)) + return prot; + + prot = pgprot_noncached(prot); + for_each_pci_dev(pdev) { + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { + struct resource *rp = &pdev->resource[i]; + int flags = rp->flags; + + /* Active and same type? */ + if ((flags & IORESOURCE_MEM) == 0) + continue; + /* In the range of this resource? */ + if (offset < (rp->start & PAGE_MASK) || + offset > rp->end) + continue; + found = rp; + break; + } + if (found) + break; + } + if (found) { + if (found->flags & IORESOURCE_PREFETCH) + prot = pgprot_noncached_wc(prot); + pci_dev_put(pdev); + } + + pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", + (unsigned long long)offset, pgprot_val(prot)); + + return prot; +} + +/* This provides legacy IO read access on a bus */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + unsigned long offset; + struct pci_controller *hose = pci_bus_to_host(bus); + struct resource *rp = &hose->io_resource; + void __iomem *addr; + + /* Check if port can be supported by that bus. We only check + * the ranges of the PHB though, not the bus itself as the rules + * for forwarding legacy cycles down bridges are not our problem + * here. So if the host bridge supports it, we do it. + */ + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + offset += port; + + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (offset < rp->start || (offset + size) > rp->end) + return -ENXIO; + addr = hose->io_base_virt + port; + + switch (size) { + case 1: + *((u8 *)val) = in_8(addr); + return 1; + case 2: + if (port & 1) + return -EINVAL; + *((u16 *)val) = in_le16(addr); + return 2; + case 4: + if (port & 3) + return -EINVAL; + *((u32 *)val) = in_le32(addr); + return 4; + } + return -EINVAL; +} + +/* This provides legacy IO write access on a bus */ +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + unsigned long offset; + struct pci_controller *hose = pci_bus_to_host(bus); + struct resource *rp = &hose->io_resource; + void __iomem *addr; + + /* Check if port can be supported by that bus. We only check + * the ranges of the PHB though, not the bus itself as the rules + * for forwarding legacy cycles down bridges are not our problem + * here. So if the host bridge supports it, we do it. + */ + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + offset += port; + + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (offset < rp->start || (offset + size) > rp->end) + return -ENXIO; + addr = hose->io_base_virt + port; + + /* WARNING: The generic code is idiotic. It gets passed a pointer + * to what can be a 1, 2 or 4 byte quantity and always reads that + * as a u32, which means that we have to correct the location of + * the data read within those 32 bits for size 1 and 2 + */ + switch (size) { + case 1: + out_8(addr, val >> 24); + return 1; + case 2: + if (port & 1) + return -EINVAL; + out_le16(addr, val >> 16); + return 2; + case 4: + if (port & 3) + return -EINVAL; + out_le32(addr, val); + return 4; + } + return -EINVAL; +} + +/* This provides legacy IO or memory mmap access on a bus */ +int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + resource_size_t offset = + ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; + resource_size_t size = vma->vm_end - vma->vm_start; + struct resource *rp; + + pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", + pci_domain_nr(bus), bus->number, + mmap_state == pci_mmap_mem ? "MEM" : "IO", + (unsigned long long)offset, + (unsigned long long)(offset + size - 1)); + + if (mmap_state == pci_mmap_mem) { + /* Hack alert ! + * + * Because X is lame and can fail starting if it gets an error + * trying to mmap legacy_mem (instead of just moving on without + * legacy memory access) we fake it here by giving it anonymous + * memory, effectively behaving just like /dev/zero + */ + if ((offset + size) > hose->isa_mem_size) { +#ifdef CONFIG_MMU + pr_debug("Process %s (pid:%d) mapped non-existing PCI", + current->comm, current->pid); + pr_debug("legacy memory for 0%04x:%02x\n", + pci_domain_nr(bus), bus->number); +#endif + if (vma->vm_flags & VM_SHARED) + return shmem_zero_setup(vma); + return 0; + } + offset += hose->isa_mem_phys; + } else { + unsigned long io_offset = (unsigned long)hose->io_base_virt - + _IO_BASE; + unsigned long roffset = offset + io_offset; + rp = &hose->io_resource; + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (roffset < rp->start || (roffset + size) > rp->end) + return -ENXIO; + offset += hose->io_base_phys; + } + pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); + + vma->vm_pgoff = offset >> PAGE_SHIFT; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end) +{ + struct pci_bus_region region; + + if (rsrc->flags & IORESOURCE_IO) { + pcibios_resource_to_bus(dev->bus, ®ion, + (struct resource *) rsrc); + *start = region.start; + *end = region.end; + return; + } + + /* We pass a CPU physical address to userland for MMIO instead of a + * BAR value because X is lame and expects to be able to use that + * to pass to /dev/mem! + * + * That means we may have 64-bit values where some apps only expect + * 32 (like X itself since it thinks only Sparc has 64-bit MMIO). + */ + *start = rsrc->start; + *end = rsrc->end; +} + +/** + * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree + * @hose: newly allocated pci_controller to be setup + * @dev: device node of the host bridge + * @primary: set if primary bus (32 bits only, soon to be deprecated) + * + * This function will parse the "ranges" property of a PCI host bridge device + * node and setup the resource mapping of a pci controller based on its + * content. + * + * Life would be boring if it wasn't for a few issues that we have to deal + * with here: + * + * - We can only cope with one IO space range and up to 3 Memory space + * ranges. However, some machines (thanks Apple !) tend to split their + * space into lots of small contiguous ranges. So we have to coalesce. + * + * - We can only cope with all memory ranges having the same offset + * between CPU addresses and PCI addresses. Unfortunately, some bridges + * are setup for a large 1:1 mapping along with a small "window" which + * maps PCI address 0 to some arbitrary high address of the CPU space in + * order to give access to the ISA memory hole. + * The way out of here that I've chosen for now is to always set the + * offset based on the first resource found, then override it if we + * have a different offset and the previous was set by an ISA hole. + * + * - Some busses have IO space not starting at 0, which causes trouble with + * the way we do our IO resource renumbering. The code somewhat deals with + * it for 64 bits but I would expect problems on 32 bits. + * + * - Some 32 bits platforms such as 4xx can have physical space larger than + * 32 bits so we need to use 64 bits values for the parsing + */ +void pci_process_bridge_OF_ranges(struct pci_controller *hose, + struct device_node *dev, int primary) +{ + int memno = 0, isa_hole = -1; + unsigned long long isa_mb = 0; + struct resource *res; + struct of_pci_range range; + struct of_pci_range_parser parser; + + pr_info("PCI host bridge %pOF %s ranges:\n", + dev, primary ? "(primary)" : ""); + + /* Check for ranges property */ + if (of_pci_range_parser_init(&parser, dev)) + return; + + pr_debug("Parsing ranges property...\n"); + for_each_of_pci_range(&parser, &range) { + /* Read next ranges element */ + pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ", + range.pci_space, range.pci_addr); + pr_debug("cpu_addr:0x%016llx size:0x%016llx\n", + range.cpu_addr, range.size); + + /* If we failed translation or got a zero-sized region + * (some FW try to feed us with non sensical zero sized regions + * such as power3 which look like some kind of attempt + * at exposing the VGA memory hole) + */ + if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) + continue; + + /* Act based on address space type */ + res = NULL; + switch (range.flags & IORESOURCE_TYPE_BITS) { + case IORESOURCE_IO: + pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n", + range.cpu_addr, range.cpu_addr + range.size - 1, + range.pci_addr); + + /* We support only one IO range */ + if (hose->pci_io_size) { + pr_info(" \\--> Skipped (too many) !\n"); + continue; + } + /* On 32 bits, limit I/O space to 16MB */ + if (range.size > 0x01000000) + range.size = 0x01000000; + + /* 32 bits needs to map IOs here */ + hose->io_base_virt = ioremap(range.cpu_addr, + range.size); + + /* Expect trouble if pci_addr is not 0 */ + if (primary) + isa_io_base = + (unsigned long)hose->io_base_virt; + /* pci_io_size and io_base_phys always represent IO + * space starting at 0 so we factor in pci_addr + */ + hose->pci_io_size = range.pci_addr + range.size; + hose->io_base_phys = range.cpu_addr - range.pci_addr; + + /* Build resource */ + res = &hose->io_resource; + range.cpu_addr = range.pci_addr; + + break; + case IORESOURCE_MEM: + pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", + range.cpu_addr, range.cpu_addr + range.size - 1, + range.pci_addr, + (range.pci_space & 0x40000000) ? + "Prefetch" : ""); + + /* We support only 3 memory ranges */ + if (memno >= 3) { + pr_info(" \\--> Skipped (too many) !\n"); + continue; + } + /* Handles ISA memory hole space here */ + if (range.pci_addr == 0) { + isa_mb = range.cpu_addr; + isa_hole = memno; + if (primary || isa_mem_base == 0) + isa_mem_base = range.cpu_addr; + hose->isa_mem_phys = range.cpu_addr; + hose->isa_mem_size = range.size; + } + + /* We get the PCI/Mem offset from the first range or + * the, current one if the offset came from an ISA + * hole. If they don't match, bugger. + */ + if (memno == 0 || + (isa_hole >= 0 && range.pci_addr != 0 && + hose->pci_mem_offset == isa_mb)) + hose->pci_mem_offset = range.cpu_addr - + range.pci_addr; + else if (range.pci_addr != 0 && + hose->pci_mem_offset != range.cpu_addr - + range.pci_addr) { + pr_info(" \\--> Skipped (offset mismatch) !\n"); + continue; + } + + /* Build resource */ + res = &hose->mem_resources[memno++]; + break; + } + if (res != NULL) { + res->name = dev->full_name; + res->flags = range.flags; + res->start = range.cpu_addr; + res->end = range.cpu_addr + range.size - 1; + res->parent = res->child = res->sibling = NULL; + } + } + + /* If there's an ISA hole and the pci_mem_offset is -not- matching + * the ISA hole offset, then we need to remove the ISA hole from + * the resource list for that brige + */ + if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { + unsigned int next = isa_hole + 1; + pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb); + if (next < memno) + memmove(&hose->mem_resources[isa_hole], + &hose->mem_resources[next], + sizeof(struct resource) * (memno - next)); + hose->mem_resources[--memno].flags = 0; + } +} + +/* Display the domain number in /proc */ +int pci_proc_domain(struct pci_bus *bus) +{ + return pci_domain_nr(bus); +} + +/* This header fixup will do the resource fixup for all devices as they are + * probed, but not for bridge ranges + */ +static void pcibios_fixup_resources(struct pci_dev *dev) +{ + struct pci_controller *hose = pci_bus_to_host(dev->bus); + int i; + + if (!hose) { + pr_err("No host bridge for PCI dev %s !\n", + pci_name(dev)); + return; + } + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + struct resource *res = dev->resource + i; + if (!res->flags) + continue; + if (res->start == 0) { + pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]", + pci_name(dev), i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned int)res->flags); + pr_debug("is unassigned\n"); + res->end -= res->start; + res->start = 0; + res->flags |= IORESOURCE_UNSET; + continue; + } + + pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n", + pci_name(dev), i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned int)res->flags); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); + +int pcibios_add_device(struct pci_dev *dev) +{ + dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); + + return 0; +} +EXPORT_SYMBOL(pcibios_add_device); + +/* + * Reparent resource children of pr that conflict with res + * under res, and make res replace those children. + */ +static int __init reparent_resources(struct resource *parent, + struct resource *res) +{ + struct resource *p, **pp; + struct resource **firstpp = NULL; + + for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { + if (p->end < res->start) + continue; + if (res->end < p->start) + break; + if (p->start < res->start || p->end > res->end) + return -1; /* not completely contained */ + if (firstpp == NULL) + firstpp = pp; + } + if (firstpp == NULL) + return -1; /* didn't find any conflicting entries? */ + res->parent = parent; + res->child = *firstpp; + res->sibling = *pp; + *firstpp = res; + *pp = NULL; + for (p = res->child; p != NULL; p = p->sibling) { + p->parent = res; + pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", + p->name, + (unsigned long long)p->start, + (unsigned long long)p->end, res->name); + } + return 0; +} + +/* + * Handle resources of PCI devices. If the world were perfect, we could + * just allocate all the resource regions and do nothing more. It isn't. + * On the other hand, we cannot just re-allocate all devices, as it would + * require us to know lots of host bridge internals. So we attempt to + * keep as much of the original configuration as possible, but tweak it + * when it's found to be wrong. + * + * Known BIOS problems we have to work around: + * - I/O or memory regions not configured + * - regions configured, but not enabled in the command register + * - bogus I/O addresses above 64K used + * - expansion ROMs left enabled (this may sound harmless, but given + * the fact the PCI specs explicitly allow address decoders to be + * shared between expansion ROMs and other resource regions, it's + * at least dangerous) + * + * Our solution: + * (1) Allocate resources for all buses behind PCI-to-PCI bridges. + * This gives us fixed barriers on where we can allocate. + * (2) Allocate resources for all enabled devices. If there is + * a collision, just mark the resource as unallocated. Also + * disable expansion ROMs during this step. + * (3) Try to allocate resources for disabled devices. If the + * resources were assigned correctly, everything goes well, + * if they weren't, they won't disturb allocation of other + * resources. + * (4) Assign new addresses to resources which were either + * not configured at all or misconfigured. If explicitly + * requested by the user, configure expansion ROM address + * as well. + */ + +static void pcibios_allocate_bus_resources(struct pci_bus *bus) +{ + struct pci_bus *b; + int i; + struct resource *res, *pr; + + pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", + pci_domain_nr(bus), bus->number); + + pci_bus_for_each_resource(bus, res, i) { + if (!res || !res->flags + || res->start > res->end || res->parent) + continue; + if (bus->parent == NULL) + pr = (res->flags & IORESOURCE_IO) ? + &ioport_resource : &iomem_resource; + else { + /* Don't bother with non-root busses when + * re-assigning all resources. We clear the + * resource flags as if they were colliding + * and as such ensure proper re-allocation + * later. + */ + pr = pci_find_parent_resource(bus->self, res); + if (pr == res) { + /* this happens when the generic PCI + * code (wrongly) decides that this + * bridge is transparent -- paulus + */ + continue; + } + } + + pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ", + bus->self ? pci_name(bus->self) : "PHB", + bus->number, i, + (unsigned long long)res->start, + (unsigned long long)res->end); + pr_debug("[0x%x], parent %p (%s)\n", + (unsigned int)res->flags, + pr, (pr && pr->name) ? pr->name : "nil"); + + if (pr && !(pr->flags & IORESOURCE_UNSET)) { + struct pci_dev *dev = bus->self; + + if (request_resource(pr, res) == 0) + continue; + /* + * Must be a conflict with an existing entry. + * Move that entry (or entries) under the + * bridge resource and try again. + */ + if (reparent_resources(pr, res) == 0) + continue; + + if (dev && i < PCI_BRIDGE_RESOURCE_NUM && + pci_claim_bridge_resource(dev, + i + PCI_BRIDGE_RESOURCES) == 0) + continue; + + } + pr_warn("PCI: Cannot allocate resource region "); + pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number); + res->start = res->end = 0; + res->flags = 0; + } + + list_for_each_entry(b, &bus->children, node) + pcibios_allocate_bus_resources(b); +} + +static inline void alloc_resource(struct pci_dev *dev, int idx) +{ + struct resource *pr, *r = &dev->resource[idx]; + + pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", + pci_name(dev), idx, + (unsigned long long)r->start, + (unsigned long long)r->end, + (unsigned int)r->flags); + + pr = pci_find_parent_resource(dev, r); + if (!pr || (pr->flags & IORESOURCE_UNSET) || + request_resource(pr, r) < 0) { + pr_warn("PCI: Cannot allocate resource region %d ", idx); + pr_cont("of device %s, will remap\n", pci_name(dev)); + if (pr) + pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", + pr, + (unsigned long long)pr->start, + (unsigned long long)pr->end, + (unsigned int)pr->flags); + /* We'll assign a new address later */ + r->flags |= IORESOURCE_UNSET; + r->end -= r->start; + r->start = 0; + } +} + +static void __init pcibios_allocate_resources(int pass) +{ + struct pci_dev *dev = NULL; + int idx, disabled; + u16 command; + struct resource *r; + + for_each_pci_dev(dev) { + pci_read_config_word(dev, PCI_COMMAND, &command); + for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { + r = &dev->resource[idx]; + if (r->parent) /* Already allocated */ + continue; + if (!r->flags || (r->flags & IORESOURCE_UNSET)) + continue; /* Not assigned at all */ + /* We only allocate ROMs on pass 1 just in case they + * have been screwed up by firmware + */ + if (idx == PCI_ROM_RESOURCE) + disabled = 1; + if (r->flags & IORESOURCE_IO) + disabled = !(command & PCI_COMMAND_IO); + else + disabled = !(command & PCI_COMMAND_MEMORY); + if (pass == disabled) + alloc_resource(dev, idx); + } + if (pass) + continue; + r = &dev->resource[PCI_ROM_RESOURCE]; + if (r->flags) { + /* Turn the ROM off, leave the resource region, + * but keep it unregistered. + */ + u32 reg; + pci_read_config_dword(dev, dev->rom_base_reg, ®); + if (reg & PCI_ROM_ADDRESS_ENABLE) { + pr_debug("PCI: Switching off ROM of %s\n", + pci_name(dev)); + r->flags &= ~IORESOURCE_ROM_ENABLE; + pci_write_config_dword(dev, dev->rom_base_reg, + reg & ~PCI_ROM_ADDRESS_ENABLE); + } + } + } +} + +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + resource_size_t offset; + struct resource *res, *pres; + int i; + + pr_debug("Reserving legacy ranges for domain %04x\n", + pci_domain_nr(bus)); + + /* Check for IO */ + if (!(hose->io_resource.flags & IORESOURCE_IO)) + goto no_io; + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "Legacy IO"; + res->flags = IORESOURCE_IO; + res->start = offset; + res->end = (offset + 0xfff) & 0xfffffffful; + pr_debug("Candidate legacy IO: %pR\n", res); + if (request_resource(&hose->io_resource, res)) { + pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + + no_io: + /* Check for memory */ + offset = hose->pci_mem_offset; + pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); + for (i = 0; i < 3; i++) { + pres = &hose->mem_resources[i]; + if (!(pres->flags & IORESOURCE_MEM)) + continue; + pr_debug("hose mem res: %pR\n", pres); + if ((pres->start - offset) <= 0xa0000 && + (pres->end - offset) >= 0xbffff) + break; + } + if (i >= 3) + return; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "Legacy VGA memory"; + res->flags = IORESOURCE_MEM; + res->start = 0xa0000 + offset; + res->end = 0xbffff + offset; + pr_debug("Candidate VGA memory: %pR\n", res); + if (request_resource(pres, res)) { + pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } +} + +void __init pcibios_resource_survey(void) +{ + struct pci_bus *b; + + /* Allocate and assign resources. If we re-assign everything, then + * we skip the allocate phase + */ + list_for_each_entry(b, &pci_root_buses, node) + pcibios_allocate_bus_resources(b); + + pcibios_allocate_resources(0); + pcibios_allocate_resources(1); + + /* Before we start assigning unassigned resource, we try to reserve + * the low IO area and the VGA memory area if they intersect the + * bus available resources to avoid allocating things on top of them + */ + list_for_each_entry(b, &pci_root_buses, node) + pcibios_reserve_legacy_regions(b); + + /* Now proceed to assigning things that were left unassigned */ + pr_debug("PCI: Assigning unassigned resources...\n"); + pci_assign_unassigned_resources(); +} + +static void pcibios_setup_phb_resources(struct pci_controller *hose, + struct list_head *resources) +{ + unsigned long io_offset; + struct resource *res; + int i; + + /* Hookup PHB IO resource */ + res = &hose->io_resource; + + /* Fixup IO space offset */ + io_offset = (unsigned long)hose->io_base_virt - isa_io_base; + res->start = (res->start + io_offset) & 0xffffffffu; + res->end = (res->end + io_offset) & 0xffffffffu; + + if (!res->flags) { + pr_warn("PCI: I/O resource not set for host "); + pr_cont("bridge %pOF (domain %d)\n", + hose->dn, hose->global_number); + /* Workaround for lack of IO resource only on 32-bit */ + res->start = (unsigned long)hose->io_base_virt - isa_io_base; + res->end = res->start + IO_SPACE_LIMIT; + res->flags = IORESOURCE_IO; + } + pci_add_resource_offset(resources, res, + (__force resource_size_t)(hose->io_base_virt - _IO_BASE)); + + pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned long)res->flags); + + /* Hookup PHB Memory resources */ + for (i = 0; i < 3; ++i) { + res = &hose->mem_resources[i]; + if (!res->flags) { + if (i > 0) + continue; + pr_err("PCI: Memory resource 0 not set for "); + pr_cont("host bridge %pOF (domain %d)\n", + hose->dn, hose->global_number); + + /* Workaround for lack of MEM resource only on 32-bit */ + res->start = hose->pci_mem_offset; + res->end = (resource_size_t)-1LL; + res->flags = IORESOURCE_MEM; + + } + pci_add_resource_offset(resources, res, hose->pci_mem_offset); + + pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", + i, (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned long)res->flags); + } + + pr_debug("PCI: PHB MEM offset = %016llx\n", + (unsigned long long)hose->pci_mem_offset); + pr_debug("PCI: PHB IO offset = %08lx\n", + (unsigned long)hose->io_base_virt - _IO_BASE); +} + +static void pcibios_scan_phb(struct pci_controller *hose) +{ + LIST_HEAD(resources); + struct pci_bus *bus; + struct device_node *node = hose->dn; + + pr_debug("PCI: Scanning PHB %pOF\n", node); + + pcibios_setup_phb_resources(hose, &resources); + + bus = pci_scan_root_bus(hose->parent, hose->first_busno, + hose->ops, hose, &resources); + if (bus == NULL) { + pr_err("Failed to create bus for PCI domain %04x\n", + hose->global_number); + pci_free_resource_list(&resources); + return; + } + bus->busn_res.start = hose->first_busno; + hose->bus = bus; + + hose->last_busno = bus->busn_res.end; +} + +static int __init pcibios_init(void) +{ + struct pci_controller *hose, *tmp; + int next_busno = 0; + + pr_info("PCI: Probing PCI hardware\n"); + + /* Scan all of the recorded PCI controllers. */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + hose->last_busno = 0xff; + pcibios_scan_phb(hose); + if (next_busno <= hose->last_busno) + next_busno = hose->last_busno + 1; + } + pci_bus_count = next_busno; + + /* Call common code to handle resource allocation */ + pcibios_resource_survey(); + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + if (hose->bus) + pci_bus_add_devices(hose->bus); + } + + return 0; +} + +subsys_initcall(pcibios_init); + +static struct pci_controller *pci_bus_to_hose(int bus) +{ + struct pci_controller *hose, *tmp; + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + if (bus >= hose->first_busno && bus <= hose->last_busno) + return hose; + return NULL; +} + +/* Provide information on locations of various I/O regions in physical + * memory. Do this on a per-card basis so that we choose the right + * root bridge. + * Note that the returned IO or memory base is a physical address + */ + +long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) +{ + struct pci_controller *hose; + long result = -EOPNOTSUPP; + + hose = pci_bus_to_hose(bus); + if (!hose) + return -ENODEV; + + switch (which) { + case IOBASE_BRIDGE_NUMBER: + return (long)hose->first_busno; + case IOBASE_MEMORY: + return (long)hose->pci_mem_offset; + case IOBASE_IO: + return (long)hose->io_base_phys; + case IOBASE_ISA_IO: + return (long)isa_io_base; + case IOBASE_ISA_MEM: + return (long)isa_mem_base; + } + + return result; +} + +/* + * Null PCI config access functions, for the case when we can't + * find a hose. + */ +#define NULL_PCI_OP(rw, size, type) \ +static int \ +null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ +{ \ + return PCIBIOS_DEVICE_NOT_FOUND; \ +} + +static int +null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 *val) +{ + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static int +null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 val) +{ + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static struct pci_ops null_pci_ops = { + .read = null_read_config, + .write = null_write_config, +}; + +/* + * These functions are used early on before PCI scanning is done + * and all of the pci_dev and pci_bus structures have been created. + */ +static struct pci_bus * +fake_pci_bus(struct pci_controller *hose, int busnr) +{ + static struct pci_bus bus; + + if (!hose) + pr_err("Can't find hose for PCI bus %d!\n", busnr); + + bus.number = busnr; + bus.sysdata = hose; + bus.ops = hose ? hose->ops : &null_pci_ops; + return &bus; +} + +#define EARLY_PCI_OP(rw, size, type) \ +int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ + int devfn, int offset, type value) \ +{ \ + return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ + devfn, offset, value); \ +} + +EARLY_PCI_OP(read, byte, u8 *) +EARLY_PCI_OP(read, word, u16 *) +EARLY_PCI_OP(read, dword, u32 *) +EARLY_PCI_OP(write, byte, u8) +EARLY_PCI_OP(write, word, u16) +EARLY_PCI_OP(write, dword, u32) + +int early_find_capability(struct pci_controller *hose, int bus, int devfn, + int cap) +{ + return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); +} + diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c new file mode 100644 index 000000000..b800909dd --- /dev/null +++ b/arch/microblaze/pci/xilinx_pci.c @@ -0,0 +1,170 @@ +/* + * PCI support for Xilinx plbv46_pci soft-core which can be used on + * Xilinx Virtex ML410 / ML510 boards. + * + * Copyright 2009 Roderick Colenbrander + * Copyright 2009 Secret Lab Technologies Ltd. + * + * The pci bridge fixup code was copied from ppc4xx_pci.c and was written + * by Benjamin Herrenschmidt. + * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/ioport.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/pci.h> +#include <linux/io.h> + +#define XPLB_PCI_ADDR 0x10c +#define XPLB_PCI_DATA 0x110 +#define XPLB_PCI_BUS 0x114 + +#define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \ + PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY) + +static struct of_device_id xilinx_pci_match[] = { + { .compatible = "xlnx,plbv46-pci-1.03.a", }, + {} +}; + +/** + * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. + */ +static void xilinx_pci_fixup_bridge(struct pci_dev *dev) +{ + struct pci_controller *hose; + int i; + + if (dev->devfn || dev->bus->self) + return; + + hose = pci_bus_to_host(dev->bus); + if (!hose) + return; + + if (!of_match_node(xilinx_pci_match, hose->dn)) + return; + + /* Hide the PCI host BARs from the kernel as their content doesn't + * fit well in the resource management + */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + + dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", + pci_name(dev)); +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); + +#ifdef DEBUG +/** + * xilinx_pci_exclude_device - Don't do config access for non-root bus + * + * This is a hack. Config access to any bus other than bus 0 does not + * currently work on the ML510 so we prevent it here. + */ +static int +xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) +{ + return (bus != 0); +} + +/** + * xilinx_early_pci_scan - List pci config space for available devices + * + * List pci devices in very early phase. + */ +static void __init xilinx_early_pci_scan(struct pci_controller *hose) +{ + u32 bus = 0; + u32 val, dev, func, offset; + + /* Currently we have only 2 device connected - up-to 32 devices */ + for (dev = 0; dev < 2; dev++) { + /* List only first function number - up-to 8 functions */ + for (func = 0; func < 1; func++) { + pr_info("%02x:%02x:%02x", bus, dev, func); + /* read the first 64 standardized bytes */ + /* Up-to 192 bytes can be list of capabilities */ + for (offset = 0; offset < 64; offset += 4) { + early_read_config_dword(hose, bus, + PCI_DEVFN(dev, func), offset, &val); + if (offset == 0 && val == 0xFFFFFFFF) { + pr_cont("\nABSENT"); + break; + } + if (!(offset % 0x10)) + pr_cont("\n%04x: ", offset); + + pr_cont("%08x ", val); + } + pr_info("\n"); + } + } +} +#else +static void __init xilinx_early_pci_scan(struct pci_controller *hose) +{ +} +#endif + +/** + * xilinx_pci_init - Find and register a Xilinx PCI host bridge + */ +void __init xilinx_pci_init(void) +{ + struct pci_controller *hose; + struct resource r; + void __iomem *pci_reg; + struct device_node *pci_node; + + pci_node = of_find_matching_node(NULL, xilinx_pci_match); + if (!pci_node) + return; + + if (of_address_to_resource(pci_node, 0, &r)) { + pr_err("xilinx-pci: cannot resolve base address\n"); + return; + } + + hose = pcibios_alloc_controller(pci_node); + if (!hose) { + pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); + return; + } + + /* Setup config space */ + setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, + r.start + XPLB_PCI_DATA, + INDIRECT_TYPE_SET_CFG_TYPE); + + /* According to the xilinx plbv46_pci documentation the soft-core starts + * a self-init when the bus master enable bit is set. Without this bit + * set the pci bus can't be scanned. + */ + early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); + + /* Set the max latency timer to 255 */ + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); + + /* Set the max bus number to 255, and bus/subbus no's to 0 */ + pci_reg = of_iomap(pci_node, 0); + WARN_ON(!pci_reg); + out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff); + iounmap(pci_reg); + + /* Register the host bridge with the linux kernel! */ + pci_process_bridge_OF_ranges(hose, pci_node, + INDIRECT_TYPE_SET_CFG_TYPE); + + pr_info("xilinx-pci: Registered PCI host bridge\n"); + xilinx_early_pci_scan(hose); +} |