From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- arch/csky/Kbuild | 6 + arch/csky/Kconfig | 357 +++++++ arch/csky/Kconfig.debug | 2 + arch/csky/Kconfig.platforms | 9 + arch/csky/Makefile | 78 ++ arch/csky/abiv1/Makefile | 6 + arch/csky/abiv1/alignment.c | 341 +++++++ arch/csky/abiv1/bswapdi.c | 12 + arch/csky/abiv1/bswapsi.c | 12 + arch/csky/abiv1/cacheflush.c | 75 ++ arch/csky/abiv1/inc/abi/cacheflush.h | 63 ++ arch/csky/abiv1/inc/abi/ckmmu.h | 101 ++ arch/csky/abiv1/inc/abi/elf.h | 26 + arch/csky/abiv1/inc/abi/entry.h | 176 ++++ arch/csky/abiv1/inc/abi/page.h | 27 + arch/csky/abiv1/inc/abi/pgtable-bits.h | 55 ++ arch/csky/abiv1/inc/abi/reg_ops.h | 26 + arch/csky/abiv1/inc/abi/regdef.h | 31 + arch/csky/abiv1/inc/abi/string.h | 15 + arch/csky/abiv1/inc/abi/switch_context.h | 16 + arch/csky/abiv1/inc/abi/vdso.h | 9 + arch/csky/abiv1/mmap.c | 71 ++ arch/csky/abiv2/Makefile | 14 + arch/csky/abiv2/cacheflush.c | 92 ++ arch/csky/abiv2/fpu.c | 270 +++++ arch/csky/abiv2/inc/abi/cacheflush.h | 60 ++ arch/csky/abiv2/inc/abi/ckmmu.h | 139 +++ arch/csky/abiv2/inc/abi/elf.h | 43 + arch/csky/abiv2/inc/abi/entry.h | 314 ++++++ arch/csky/abiv2/inc/abi/fpu.h | 66 ++ arch/csky/abiv2/inc/abi/page.h | 13 + arch/csky/abiv2/inc/abi/pgtable-bits.h | 53 + arch/csky/abiv2/inc/abi/reg_ops.h | 16 + arch/csky/abiv2/inc/abi/regdef.h | 31 + arch/csky/abiv2/inc/abi/string.h | 27 + arch/csky/abiv2/inc/abi/switch_context.h | 31 + arch/csky/abiv2/inc/abi/vdso.h | 9 + arch/csky/abiv2/mcount.S | 211 ++++ arch/csky/abiv2/memcmp.S | 152 +++ arch/csky/abiv2/memcpy.S | 104 ++ arch/csky/abiv2/memmove.S | 104 ++ arch/csky/abiv2/memset.S | 83 ++ arch/csky/abiv2/strcmp.S | 168 ++++ arch/csky/abiv2/strcpy.S | 123 +++ arch/csky/abiv2/strksyms.c | 14 + arch/csky/abiv2/strlen.S | 97 ++ arch/csky/abiv2/sysdep.h | 29 + arch/csky/boot/Makefile | 24 + arch/csky/boot/dts/Makefile | 4 + arch/csky/configs/defconfig | 53 + arch/csky/include/asm/Kbuild | 12 + arch/csky/include/asm/addrspace.h | 9 + arch/csky/include/asm/asid.h | 78 ++ arch/csky/include/asm/atomic.h | 202 ++++ arch/csky/include/asm/barrier.h | 88 ++ arch/csky/include/asm/bitops.h | 79 ++ arch/csky/include/asm/bug.h | 28 + arch/csky/include/asm/cache.h | 32 + arch/csky/include/asm/cacheflush.h | 9 + arch/csky/include/asm/checksum.h | 49 + arch/csky/include/asm/clocksource.h | 8 + arch/csky/include/asm/cmpxchg.h | 155 +++ arch/csky/include/asm/elf.h | 90 ++ arch/csky/include/asm/fixmap.h | 33 + arch/csky/include/asm/ftrace.h | 30 + arch/csky/include/asm/futex.h | 121 +++ arch/csky/include/asm/highmem.h | 44 + arch/csky/include/asm/io.h | 54 + arch/csky/include/asm/irq_work.h | 11 + arch/csky/include/asm/irqflags.h | 49 + arch/csky/include/asm/jump_label.h | 52 + arch/csky/include/asm/kprobes.h | 48 + arch/csky/include/asm/memory.h | 25 + arch/csky/include/asm/mmu.h | 12 + arch/csky/include/asm/mmu_context.h | 39 + arch/csky/include/asm/page.h | 100 ++ arch/csky/include/asm/pci.h | 15 + arch/csky/include/asm/perf_event.h | 14 + arch/csky/include/asm/pgalloc.h | 74 ++ arch/csky/include/asm/pgtable.h | 274 +++++ arch/csky/include/asm/probes.h | 24 + arch/csky/include/asm/processor.h | 87 ++ arch/csky/include/asm/ptrace.h | 102 ++ arch/csky/include/asm/reg_ops.h | 26 + arch/csky/include/asm/seccomp.h | 11 + arch/csky/include/asm/sections.h | 12 + arch/csky/include/asm/shmparam.h | 10 + arch/csky/include/asm/smp.h | 30 + arch/csky/include/asm/spinlock.h | 12 + arch/csky/include/asm/spinlock_types.h | 9 + arch/csky/include/asm/stackprotector.h | 21 + arch/csky/include/asm/string.h | 12 + arch/csky/include/asm/switch_to.h | 35 + arch/csky/include/asm/syscall.h | 68 ++ arch/csky/include/asm/syscalls.h | 14 + arch/csky/include/asm/tcm.h | 24 + arch/csky/include/asm/thread_info.h | 89 ++ arch/csky/include/asm/tlb.h | 9 + arch/csky/include/asm/tlbflush.h | 24 + arch/csky/include/asm/traps.h | 60 ++ arch/csky/include/asm/uaccess.h | 203 ++++ arch/csky/include/asm/unistd.h | 5 + arch/csky/include/asm/uprobes.h | 33 + arch/csky/include/asm/vdso.h | 27 + arch/csky/include/asm/vdso/clocksource.h | 9 + arch/csky/include/asm/vdso/gettimeofday.h | 114 +++ arch/csky/include/asm/vdso/processor.h | 12 + arch/csky/include/asm/vdso/vsyscall.h | 22 + arch/csky/include/asm/vmalloc.h | 4 + arch/csky/include/uapi/asm/Kbuild | 2 + arch/csky/include/uapi/asm/byteorder.h | 8 + arch/csky/include/uapi/asm/cachectl.h | 13 + arch/csky/include/uapi/asm/perf_regs.h | 50 + arch/csky/include/uapi/asm/ptrace.h | 51 + arch/csky/include/uapi/asm/sigcontext.h | 13 + arch/csky/include/uapi/asm/unistd.h | 14 + arch/csky/kernel/Makefile | 20 + arch/csky/kernel/asm-offsets.c | 83 ++ arch/csky/kernel/atomic.S | 61 ++ arch/csky/kernel/cpu-probe.c | 79 ++ arch/csky/kernel/entry.S | 274 +++++ arch/csky/kernel/ftrace.c | 234 +++++ arch/csky/kernel/head.S | 42 + arch/csky/kernel/io.c | 91 ++ arch/csky/kernel/irq.c | 17 + arch/csky/kernel/jump_label.c | 54 + arch/csky/kernel/module.c | 97 ++ arch/csky/kernel/perf_callchain.c | 114 +++ arch/csky/kernel/perf_event.c | 1371 ++++++++++++++++++++++++++ arch/csky/kernel/perf_regs.c | 39 + arch/csky/kernel/power.c | 28 + arch/csky/kernel/probes/Makefile | 7 + arch/csky/kernel/probes/decode-insn.c | 49 + arch/csky/kernel/probes/decode-insn.h | 20 + arch/csky/kernel/probes/ftrace.c | 67 ++ arch/csky/kernel/probes/kprobes.c | 412 ++++++++ arch/csky/kernel/probes/kprobes_trampoline.S | 19 + arch/csky/kernel/probes/simulate-insn.c | 390 ++++++++ arch/csky/kernel/probes/simulate-insn.h | 49 + arch/csky/kernel/probes/uprobes.c | 158 +++ arch/csky/kernel/process.c | 104 ++ arch/csky/kernel/ptrace.c | 521 ++++++++++ arch/csky/kernel/setup.c | 134 +++ arch/csky/kernel/signal.c | 267 +++++ arch/csky/kernel/smp.c | 319 ++++++ arch/csky/kernel/stacktrace.c | 156 +++ arch/csky/kernel/syscall.c | 43 + arch/csky/kernel/syscall_table.c | 14 + arch/csky/kernel/time.c | 11 + arch/csky/kernel/traps.c | 262 +++++ arch/csky/kernel/vdso.c | 107 ++ arch/csky/kernel/vdso/.gitignore | 4 + arch/csky/kernel/vdso/Makefile | 70 ++ arch/csky/kernel/vdso/note.S | 12 + arch/csky/kernel/vdso/rt_sigreturn.S | 14 + arch/csky/kernel/vdso/so2s.sh | 5 + arch/csky/kernel/vdso/vdso.S | 16 + arch/csky/kernel/vdso/vdso.lds.S | 58 ++ arch/csky/kernel/vdso/vgettimeofday.c | 39 + arch/csky/kernel/vmlinux.lds.S | 115 +++ arch/csky/lib/Makefile | 6 + arch/csky/lib/delay.c | 39 + arch/csky/lib/error-inject.c | 10 + arch/csky/lib/string.c | 134 +++ arch/csky/lib/usercopy.c | 214 ++++ arch/csky/mm/Makefile | 19 + arch/csky/mm/asid.c | 188 ++++ arch/csky/mm/cachev1.c | 136 +++ arch/csky/mm/cachev2.c | 120 +++ arch/csky/mm/context.c | 46 + arch/csky/mm/dma-mapping.c | 87 ++ arch/csky/mm/fault.c | 298 ++++++ arch/csky/mm/highmem.c | 36 + arch/csky/mm/init.c | 219 ++++ arch/csky/mm/ioremap.c | 19 + arch/csky/mm/syscache.c | 32 + arch/csky/mm/tcm.c | 169 ++++ arch/csky/mm/tlb.c | 198 ++++ 178 files changed, 14967 insertions(+) create mode 100644 arch/csky/Kbuild create mode 100644 arch/csky/Kconfig create mode 100644 arch/csky/Kconfig.debug create mode 100644 arch/csky/Kconfig.platforms create mode 100644 arch/csky/Makefile create mode 100644 arch/csky/abiv1/Makefile create mode 100644 arch/csky/abiv1/alignment.c create mode 100644 arch/csky/abiv1/bswapdi.c create mode 100644 arch/csky/abiv1/bswapsi.c create mode 100644 arch/csky/abiv1/cacheflush.c create mode 100644 arch/csky/abiv1/inc/abi/cacheflush.h create mode 100644 arch/csky/abiv1/inc/abi/ckmmu.h create mode 100644 arch/csky/abiv1/inc/abi/elf.h create mode 100644 arch/csky/abiv1/inc/abi/entry.h create mode 100644 arch/csky/abiv1/inc/abi/page.h create mode 100644 arch/csky/abiv1/inc/abi/pgtable-bits.h create mode 100644 arch/csky/abiv1/inc/abi/reg_ops.h create mode 100644 arch/csky/abiv1/inc/abi/regdef.h create mode 100644 arch/csky/abiv1/inc/abi/string.h create mode 100644 arch/csky/abiv1/inc/abi/switch_context.h create mode 100644 arch/csky/abiv1/inc/abi/vdso.h create mode 100644 arch/csky/abiv1/mmap.c create mode 100644 arch/csky/abiv2/Makefile create mode 100644 arch/csky/abiv2/cacheflush.c create mode 100644 arch/csky/abiv2/fpu.c create mode 100644 arch/csky/abiv2/inc/abi/cacheflush.h create mode 100644 arch/csky/abiv2/inc/abi/ckmmu.h create mode 100644 arch/csky/abiv2/inc/abi/elf.h create mode 100644 arch/csky/abiv2/inc/abi/entry.h create mode 100644 arch/csky/abiv2/inc/abi/fpu.h create mode 100644 arch/csky/abiv2/inc/abi/page.h create mode 100644 arch/csky/abiv2/inc/abi/pgtable-bits.h create mode 100644 arch/csky/abiv2/inc/abi/reg_ops.h create mode 100644 arch/csky/abiv2/inc/abi/regdef.h create mode 100644 arch/csky/abiv2/inc/abi/string.h create mode 100644 arch/csky/abiv2/inc/abi/switch_context.h create mode 100644 arch/csky/abiv2/inc/abi/vdso.h create mode 100644 arch/csky/abiv2/mcount.S create mode 100644 arch/csky/abiv2/memcmp.S create mode 100644 arch/csky/abiv2/memcpy.S create mode 100644 arch/csky/abiv2/memmove.S create mode 100644 arch/csky/abiv2/memset.S create mode 100644 arch/csky/abiv2/strcmp.S create mode 100644 arch/csky/abiv2/strcpy.S create mode 100644 arch/csky/abiv2/strksyms.c create mode 100644 arch/csky/abiv2/strlen.S create mode 100644 arch/csky/abiv2/sysdep.h create mode 100644 arch/csky/boot/Makefile create mode 100644 arch/csky/boot/dts/Makefile create mode 100644 arch/csky/configs/defconfig create mode 100644 arch/csky/include/asm/Kbuild create mode 100644 arch/csky/include/asm/addrspace.h create mode 100644 arch/csky/include/asm/asid.h create mode 100644 arch/csky/include/asm/atomic.h create mode 100644 arch/csky/include/asm/barrier.h create mode 100644 arch/csky/include/asm/bitops.h create mode 100644 arch/csky/include/asm/bug.h create mode 100644 arch/csky/include/asm/cache.h create mode 100644 arch/csky/include/asm/cacheflush.h create mode 100644 arch/csky/include/asm/checksum.h create mode 100644 arch/csky/include/asm/clocksource.h create mode 100644 arch/csky/include/asm/cmpxchg.h create mode 100644 arch/csky/include/asm/elf.h create mode 100644 arch/csky/include/asm/fixmap.h create mode 100644 arch/csky/include/asm/ftrace.h create mode 100644 arch/csky/include/asm/futex.h create mode 100644 arch/csky/include/asm/highmem.h create mode 100644 arch/csky/include/asm/io.h create mode 100644 arch/csky/include/asm/irq_work.h create mode 100644 arch/csky/include/asm/irqflags.h create mode 100644 arch/csky/include/asm/jump_label.h create mode 100644 arch/csky/include/asm/kprobes.h create mode 100644 arch/csky/include/asm/memory.h create mode 100644 arch/csky/include/asm/mmu.h create mode 100644 arch/csky/include/asm/mmu_context.h create mode 100644 arch/csky/include/asm/page.h create mode 100644 arch/csky/include/asm/pci.h create mode 100644 arch/csky/include/asm/perf_event.h create mode 100644 arch/csky/include/asm/pgalloc.h create mode 100644 arch/csky/include/asm/pgtable.h create mode 100644 arch/csky/include/asm/probes.h create mode 100644 arch/csky/include/asm/processor.h create mode 100644 arch/csky/include/asm/ptrace.h create mode 100644 arch/csky/include/asm/reg_ops.h create mode 100644 arch/csky/include/asm/seccomp.h create mode 100644 arch/csky/include/asm/sections.h create mode 100644 arch/csky/include/asm/shmparam.h create mode 100644 arch/csky/include/asm/smp.h create mode 100644 arch/csky/include/asm/spinlock.h create mode 100644 arch/csky/include/asm/spinlock_types.h create mode 100644 arch/csky/include/asm/stackprotector.h create mode 100644 arch/csky/include/asm/string.h create mode 100644 arch/csky/include/asm/switch_to.h create mode 100644 arch/csky/include/asm/syscall.h create mode 100644 arch/csky/include/asm/syscalls.h create mode 100644 arch/csky/include/asm/tcm.h create mode 100644 arch/csky/include/asm/thread_info.h create mode 100644 arch/csky/include/asm/tlb.h create mode 100644 arch/csky/include/asm/tlbflush.h create mode 100644 arch/csky/include/asm/traps.h create mode 100644 arch/csky/include/asm/uaccess.h create mode 100644 arch/csky/include/asm/unistd.h create mode 100644 arch/csky/include/asm/uprobes.h create mode 100644 arch/csky/include/asm/vdso.h create mode 100644 arch/csky/include/asm/vdso/clocksource.h create mode 100644 arch/csky/include/asm/vdso/gettimeofday.h create mode 100644 arch/csky/include/asm/vdso/processor.h create mode 100644 arch/csky/include/asm/vdso/vsyscall.h create mode 100644 arch/csky/include/asm/vmalloc.h create mode 100644 arch/csky/include/uapi/asm/Kbuild create mode 100644 arch/csky/include/uapi/asm/byteorder.h create mode 100644 arch/csky/include/uapi/asm/cachectl.h create mode 100644 arch/csky/include/uapi/asm/perf_regs.h create mode 100644 arch/csky/include/uapi/asm/ptrace.h create mode 100644 arch/csky/include/uapi/asm/sigcontext.h create mode 100644 arch/csky/include/uapi/asm/unistd.h create mode 100644 arch/csky/kernel/Makefile create mode 100644 arch/csky/kernel/asm-offsets.c create mode 100644 arch/csky/kernel/atomic.S create mode 100644 arch/csky/kernel/cpu-probe.c create mode 100644 arch/csky/kernel/entry.S create mode 100644 arch/csky/kernel/ftrace.c create mode 100644 arch/csky/kernel/head.S create mode 100644 arch/csky/kernel/io.c create mode 100644 arch/csky/kernel/irq.c create mode 100644 arch/csky/kernel/jump_label.c create mode 100644 arch/csky/kernel/module.c create mode 100644 arch/csky/kernel/perf_callchain.c create mode 100644 arch/csky/kernel/perf_event.c create mode 100644 arch/csky/kernel/perf_regs.c create mode 100644 arch/csky/kernel/power.c create mode 100644 arch/csky/kernel/probes/Makefile create mode 100644 arch/csky/kernel/probes/decode-insn.c create mode 100644 arch/csky/kernel/probes/decode-insn.h create mode 100644 arch/csky/kernel/probes/ftrace.c create mode 100644 arch/csky/kernel/probes/kprobes.c create mode 100644 arch/csky/kernel/probes/kprobes_trampoline.S create mode 100644 arch/csky/kernel/probes/simulate-insn.c create mode 100644 arch/csky/kernel/probes/simulate-insn.h create mode 100644 arch/csky/kernel/probes/uprobes.c create mode 100644 arch/csky/kernel/process.c create mode 100644 arch/csky/kernel/ptrace.c create mode 100644 arch/csky/kernel/setup.c create mode 100644 arch/csky/kernel/signal.c create mode 100644 arch/csky/kernel/smp.c create mode 100644 arch/csky/kernel/stacktrace.c create mode 100644 arch/csky/kernel/syscall.c create mode 100644 arch/csky/kernel/syscall_table.c create mode 100644 arch/csky/kernel/time.c create mode 100644 arch/csky/kernel/traps.c create mode 100644 arch/csky/kernel/vdso.c create mode 100644 arch/csky/kernel/vdso/.gitignore create mode 100644 arch/csky/kernel/vdso/Makefile create mode 100644 arch/csky/kernel/vdso/note.S create mode 100644 arch/csky/kernel/vdso/rt_sigreturn.S create mode 100755 arch/csky/kernel/vdso/so2s.sh create mode 100644 arch/csky/kernel/vdso/vdso.S create mode 100644 arch/csky/kernel/vdso/vdso.lds.S create mode 100644 arch/csky/kernel/vdso/vgettimeofday.c create mode 100644 arch/csky/kernel/vmlinux.lds.S create mode 100644 arch/csky/lib/Makefile create mode 100644 arch/csky/lib/delay.c create mode 100644 arch/csky/lib/error-inject.c create mode 100644 arch/csky/lib/string.c create mode 100644 arch/csky/lib/usercopy.c create mode 100644 arch/csky/mm/Makefile create mode 100644 arch/csky/mm/asid.c create mode 100644 arch/csky/mm/cachev1.c create mode 100644 arch/csky/mm/cachev2.c create mode 100644 arch/csky/mm/context.c create mode 100644 arch/csky/mm/dma-mapping.c create mode 100644 arch/csky/mm/fault.c create mode 100644 arch/csky/mm/highmem.c create mode 100644 arch/csky/mm/init.c create mode 100644 arch/csky/mm/ioremap.c create mode 100644 arch/csky/mm/syscache.c create mode 100644 arch/csky/mm/tcm.c create mode 100644 arch/csky/mm/tlb.c (limited to 'arch/csky') diff --git a/arch/csky/Kbuild b/arch/csky/Kbuild new file mode 100644 index 0000000000..0621eaea41 --- /dev/null +++ b/arch/csky/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-y += kernel/ mm/ + +# for cleaning +subdir- += boot diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig new file mode 100644 index 0000000000..cf2a6fd7df --- /dev/null +++ b/arch/csky/Kconfig @@ -0,0 +1,357 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CSKY + def_bool y + select ARCH_32BIT_OFF_T + select ARCH_HAS_DMA_PREP_COHERENT + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_HAS_CURRENT_STACK_POINTER + select ARCH_INLINE_READ_LOCK if !PREEMPTION + select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT + select COMMON_CLK + select CLKSRC_MMIO + select CSKY_MPINTC if CPU_CK860 + select CSKY_MP_TIMER if CPU_CK860 + select CSKY_APB_INTC + select DMA_DIRECT_REMAP + select IRQ_DOMAIN + select DW_APB_TIMER_OF + select GENERIC_IOREMAP + select GENERIC_LIB_ASHLDI3 + select GENERIC_LIB_ASHRDI3 + select GENERIC_LIB_LSHRDI3 + select GENERIC_LIB_MULDI3 + select GENERIC_LIB_CMPDI2 + select GENERIC_LIB_UCMPDI2 + select GENERIC_ALLOCATOR + select GENERIC_ATOMIC64 + select GENERIC_CPU_DEVICES + select GENERIC_IRQ_CHIP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_MULTI_HANDLER + select GENERIC_SCHED_CLOCK + select GENERIC_SMP_IDLE_THREAD + select GENERIC_TIME_VSYSCALL + select GENERIC_VDSO_32 + select GENERIC_GETTIMEOFDAY + select GX6605S_TIMER if CPU_CK610 + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_JUMP_LABEL if !CPU_CK610 + select HAVE_ARCH_JUMP_LABEL_RELATIVE + select HAVE_ARCH_MMAP_RND_BITS + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_CONTEXT_TRACKING_USER + select HAVE_VIRT_CPU_ACCOUNTING_GEN + select HAVE_DEBUG_BUGVERBOSE + select HAVE_DEBUG_KMEMLEAK + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_GENERIC_VDSO + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_ERROR_INJECTION + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZO + select HAVE_KERNEL_LZMA + select HAVE_KPROBES if !CPU_CK610 + select HAVE_KPROBES_ON_FTRACE if !CPU_CK610 + select HAVE_KRETPROBES if !CPU_CK610 + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_DMA_CONTIGUOUS + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_STACKPROTECTOR + select HAVE_SYSCALL_TRACEPOINTS + select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU + select LOCK_MM_AND_FIND_VMA + select MAY_HAVE_SPARSE_IRQ + select MODULES_USE_ELF_RELA if MODULES + select OF + select OF_EARLY_FLATTREE + select PERF_USE_VMALLOC if CPU_CK610 + select RTC_LIB + select TIMER_OF + select GENERIC_PCI_IOMAP + select HAVE_PCI + select PCI_DOMAINS_GENERIC if PCI + select PCI_SYSCALL if PCI + select PCI_MSI if PCI + select TRACE_IRQFLAGS_SUPPORT + +config LOCKDEP_SUPPORT + def_bool y + +config ARCH_SUPPORTS_UPROBES + def_bool y if !CPU_CK610 + +config CPU_HAS_CACHEV2 + bool + +config CPU_HAS_FPUV2 + bool + +config CPU_HAS_HILO + bool + +config CPU_HAS_TLBI + bool + +config CPU_HAS_LDSTEX + bool + help + For SMP, CPU needs "ldex&stex" instructions for atomic operations. + +config CPU_NEED_TLBSYNC + bool + +config CPU_NEED_SOFTALIGN + bool + +config CPU_NO_USER_BKPT + bool + help + For abiv2 we couldn't use "trap 1" as user space bkpt in gdbserver, because + abiv2 is 16/32bit instruction set and "trap 1" is 32bit. + So we need a 16bit instruction as user space bkpt, and it will cause an illegal + instruction exception. + In kernel we parse the *regs->pc to determine whether to send SIGTRAP or not. + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config GENERIC_CSUM + def_bool y + +config GENERIC_HWEIGHT + def_bool y + +config MMU + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config TIME_LOW_RES + def_bool y + +config CPU_ASID_BITS + int + default "8" if (CPU_CK610 || CPU_CK807 || CPU_CK810) + default "12" if (CPU_CK860) + +config L1_CACHE_SHIFT + int + default "4" if (CPU_CK610) + default "5" if (CPU_CK807 || CPU_CK810) + default "6" if (CPU_CK860) + +config ARCH_MMAP_RND_BITS_MIN + default 8 + +# max bits determined by the following formula: +# VA_BITS - PAGE_SHIFT - 3 +config ARCH_MMAP_RND_BITS_MAX + default 17 + +menu "Processor type and features" + +choice + prompt "CPU MODEL" + default CPU_CK807 + +config CPU_CK610 + bool "CSKY CPU ck610" + select CPU_NEED_TLBSYNC + select CPU_NEED_SOFTALIGN + select CPU_NO_USER_BKPT + +config CPU_CK810 + bool "CSKY CPU ck810" + select CPU_HAS_HILO + select CPU_NEED_TLBSYNC + +config CPU_CK807 + bool "CSKY CPU ck807" + select CPU_HAS_HILO + +config CPU_CK860 + bool "CSKY CPU ck860" + select CPU_HAS_TLBI + select CPU_HAS_CACHEV2 + select CPU_HAS_LDSTEX + select CPU_HAS_FPUV2 +endchoice + +choice + prompt "PAGE OFFSET" + default PAGE_OFFSET_80000000 + +config PAGE_OFFSET_80000000 + bool "PAGE OFFSET 2G (user:kernel = 2:2)" + +config PAGE_OFFSET_A0000000 + bool "PAGE OFFSET 2.5G (user:kernel = 2.5:1.5)" +endchoice + +config PAGE_OFFSET + hex + default 0x80000000 if PAGE_OFFSET_80000000 + default 0xa0000000 if PAGE_OFFSET_A0000000 +choice + + prompt "C-SKY PMU type" + depends on PERF_EVENTS + depends on CPU_CK807 || CPU_CK810 || CPU_CK860 + +config CPU_PMU_NONE + bool "None" + +config CSKY_PMU_V1 + bool "Performance Monitoring Unit Ver.1" + +endchoice + +choice + prompt "Power Manager Instruction (wait/doze/stop)" + default CPU_PM_NONE + +config CPU_PM_NONE + bool "None" + +config CPU_PM_WAIT + bool "wait" + +config CPU_PM_DOZE + bool "doze" + +config CPU_PM_STOP + bool "stop" +endchoice + +menuconfig HAVE_TCM + bool "Tightly-Coupled/Sram Memory" + depends on !COMPILE_TEST + help + The implementation are not only used by TCM (Tightly-Coupled Memory) + but also used by sram on SOC bus. It follow existed linux tcm + software interface, so that old tcm application codes could be + re-used directly. + +if HAVE_TCM +config ITCM_RAM_BASE + hex "ITCM ram base" + default 0xffffffff + +config ITCM_NR_PAGES + int "Page count of ITCM size: NR*4KB" + range 1 256 + default 32 + +config HAVE_DTCM + bool "DTCM Support" + +config DTCM_RAM_BASE + hex "DTCM ram base" + depends on HAVE_DTCM + default 0xffffffff + +config DTCM_NR_PAGES + int "Page count of DTCM size: NR*4KB" + depends on HAVE_DTCM + range 1 256 + default 32 +endif + +config CPU_HAS_VDSP + bool "CPU has VDSP coprocessor" + depends on CPU_HAS_FPU && CPU_HAS_FPUV2 + +config CPU_HAS_FPU + bool "CPU has FPU coprocessor" + depends on CPU_CK807 || CPU_CK810 || CPU_CK860 + +config CPU_HAS_ICACHE_INS + bool "CPU has Icache invalidate instructions" + depends on CPU_HAS_CACHEV2 + +config CPU_HAS_TEE + bool "CPU has Trusted Execution Environment" + depends on CPU_CK810 + +config SMP + bool "Symmetric Multi-Processing (SMP) support for C-SKY" + depends on CPU_CK860 + default n + +config NR_CPUS + int "Maximum number of CPUs (2-32)" + range 2 32 + depends on SMP + default "4" + +config HIGHMEM + bool "High Memory Support" + depends on !CPU_CK610 + select KMAP_LOCAL + default y + +config DRAM_BASE + hex "DRAM start addr (the same with memory-section in dts)" + default 0x0 + +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + select GENERIC_IRQ_MIGRATION + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu/cpu1/hotplug/target. + + Say N if you want to disable CPU hotplug. + +config HAVE_EFFICIENT_UNALIGNED_STRING_OPS + bool "Enable EFFICIENT_UNALIGNED_STRING_OPS for abiv2" + depends on CPU_CK807 || CPU_CK810 || CPU_CK860 + help + Say Y here to enable EFFICIENT_UNALIGNED_STRING_OPS. Some CPU models could + deal with unaligned access by hardware. + +endmenu + +source "arch/csky/Kconfig.platforms" + +source "kernel/Kconfig.hz" diff --git a/arch/csky/Kconfig.debug b/arch/csky/Kconfig.debug new file mode 100644 index 0000000000..295942fe3f --- /dev/null +++ b/arch/csky/Kconfig.debug @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +# dummy file, do not delete diff --git a/arch/csky/Kconfig.platforms b/arch/csky/Kconfig.platforms new file mode 100644 index 0000000000..639e17f4ea --- /dev/null +++ b/arch/csky/Kconfig.platforms @@ -0,0 +1,9 @@ +menu "Platform drivers selection" + +config ARCH_CSKY_DW_APB_ICTL + bool "Select dw-apb interrupt controller" + select DW_APB_ICTL + default y + help + This enables support for snps dw-apb-ictl +endmenu diff --git a/arch/csky/Makefile b/arch/csky/Makefile new file mode 100644 index 0000000000..0e4237e557 --- /dev/null +++ b/arch/csky/Makefile @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: GPL-2.0-only +OBJCOPYFLAGS :=-O binary +GZFLAGS :=-9 + +ifdef CONFIG_CPU_HAS_FPU +FPUEXT = f +endif + +ifdef CONFIG_CPU_HAS_VDSP +VDSPEXT = v +endif + +ifdef CONFIG_CPU_HAS_TEE +TEEEXT = t +endif + +ifdef CONFIG_CPU_CK610 +CPUTYPE = ck610 +CSKYABI = abiv1 +endif + +ifdef CONFIG_CPU_CK810 +CPUTYPE = ck810 +CSKYABI = abiv2 +endif + +ifdef CONFIG_CPU_CK807 +CPUTYPE = ck807 +CSKYABI = abiv2 +endif + +ifdef CONFIG_CPU_CK860 +CPUTYPE = ck860 +CSKYABI = abiv2 +endif + +ifneq ($(CSKYABI),) +MCPU_STR = $(CPUTYPE)$(FPUEXT)$(VDSPEXT)$(TEEEXT) +KBUILD_CFLAGS += -mcpu=$(CPUTYPE) -Wa,-mcpu=$(MCPU_STR) +KBUILD_CFLAGS += -DCSKYCPU_DEF_NAME=\"$(MCPU_STR)\" +KBUILD_CFLAGS += -msoft-float -mdiv +KBUILD_CFLAGS += -fno-tree-vectorize +endif + +KBUILD_CFLAGS += -pipe +ifeq ($(CSKYABI),abiv2) +KBUILD_CFLAGS += -mno-stack-size +endif + +ifdef CONFIG_FRAME_POINTER +KBUILD_CFLAGS += -mbacktrace +endif + +abidirs := $(patsubst %,arch/csky/%/,$(CSKYABI)) +KBUILD_CFLAGS += $(patsubst %,-I$(srctree)/%inc,$(abidirs)) + +KBUILD_CPPFLAGS += -mlittle-endian +LDFLAGS += -EL + +KBUILD_AFLAGS += $(KBUILD_CFLAGS) + +core-y += arch/csky/$(CSKYABI)/ + +libs-y += arch/csky/lib/ \ + $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) + +boot := arch/csky/boot + +all: zImage + +zImage Image uImage: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +define archhelp + echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' + echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' + echo ' uImage - U-Boot wrapped zImage' +endef diff --git a/arch/csky/abiv1/Makefile b/arch/csky/abiv1/Makefile new file mode 100644 index 0000000000..a4b2ade0fc --- /dev/null +++ b/arch/csky/abiv1/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CPU_NEED_SOFTALIGN) += alignment.o +obj-y += bswapdi.o +obj-y += bswapsi.o +obj-y += cacheflush.o +obj-y += mmap.o diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c new file mode 100644 index 0000000000..b60259daed --- /dev/null +++ b/arch/csky/abiv1/alignment.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include + +static int align_kern_enable = 1; +static int align_usr_enable = 1; +static int align_kern_count = 0; +static int align_usr_count = 0; + +static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx) +{ + return rx == 15 ? regs->lr : *((uint32_t *)&(regs->a0) - 2 + rx); +} + +static inline void put_ptreg(struct pt_regs *regs, uint32_t rx, uint32_t val) +{ + if (rx == 15) + regs->lr = val; + else + *((uint32_t *)&(regs->a0) - 2 + rx) = val; +} + +/* + * Get byte-value from addr and set it to *valp. + * + * Success: return 0 + * Failure: return 1 + */ +static int ldb_asm(uint32_t addr, uint32_t *valp) +{ + uint32_t val; + int err; + + asm volatile ( + "movi %0, 0\n" + "1:\n" + "ldb %1, (%2)\n" + "br 3f\n" + "2:\n" + "movi %0, 1\n" + "br 3f\n" + ".section __ex_table,\"a\"\n" + ".align 2\n" + ".long 1b, 2b\n" + ".previous\n" + "3:\n" + : "=&r"(err), "=r"(val) + : "r" (addr) + ); + + *valp = val; + + return err; +} + +/* + * Put byte-value to addr. + * + * Success: return 0 + * Failure: return 1 + */ +static int stb_asm(uint32_t addr, uint32_t val) +{ + int err; + + asm volatile ( + "movi %0, 0\n" + "1:\n" + "stb %1, (%2)\n" + "br 3f\n" + "2:\n" + "movi %0, 1\n" + "br 3f\n" + ".section __ex_table,\"a\"\n" + ".align 2\n" + ".long 1b, 2b\n" + ".previous\n" + "3:\n" + : "=&r"(err) + : "r"(val), "r" (addr) + ); + + return err; +} + +/* + * Get half-word from [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int ldh_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1; + + if (ldb_asm(addr, &byte0)) + return 1; + addr += 1; + if (ldb_asm(addr, &byte1)) + return 1; + + byte0 |= byte1 << 8; + put_ptreg(regs, rz, byte0); + + return 0; +} + +/* + * Store half-word to [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int sth_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1; + + byte0 = byte1 = get_ptreg(regs, rz); + + byte0 &= 0xff; + + if (stb_asm(addr, byte0)) + return 1; + + addr += 1; + byte1 = (byte1 >> 8) & 0xff; + if (stb_asm(addr, byte1)) + return 1; + + return 0; +} + +/* + * Get word from [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int ldw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1, byte2, byte3; + + if (ldb_asm(addr, &byte0)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte1)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte2)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte3)) + return 1; + + byte0 |= byte1 << 8; + byte0 |= byte2 << 16; + byte0 |= byte3 << 24; + + put_ptreg(regs, rz, byte0); + + return 0; +} + +/* + * Store word to [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1, byte2, byte3; + + byte0 = byte1 = byte2 = byte3 = get_ptreg(regs, rz); + + byte0 &= 0xff; + + if (stb_asm(addr, byte0)) + return 1; + + addr += 1; + byte1 = (byte1 >> 8) & 0xff; + if (stb_asm(addr, byte1)) + return 1; + + addr += 1; + byte2 = (byte2 >> 16) & 0xff; + if (stb_asm(addr, byte2)) + return 1; + + addr += 1; + byte3 = (byte3 >> 24) & 0xff; + if (stb_asm(addr, byte3)) + return 1; + + return 0; +} + +extern int fixup_exception(struct pt_regs *regs); + +#define OP_LDH 0xc000 +#define OP_STH 0xd000 +#define OP_LDW 0x8000 +#define OP_STW 0x9000 + +void csky_alignment(struct pt_regs *regs) +{ + int ret; + uint16_t tmp; + uint32_t opcode = 0; + uint32_t rx = 0; + uint32_t rz = 0; + uint32_t imm = 0; + uint32_t addr = 0; + + if (!user_mode(regs)) + goto kernel_area; + + if (!align_usr_enable) { + pr_err("%s user disabled.\n", __func__); + goto bad_area; + } + + align_usr_count++; + + ret = get_user(tmp, (uint16_t *)instruction_pointer(regs)); + if (ret) { + pr_err("%s get_user failed.\n", __func__); + goto bad_area; + } + + goto good_area; + +kernel_area: + if (!align_kern_enable) { + pr_err("%s kernel disabled.\n", __func__); + goto bad_area; + } + + align_kern_count++; + + tmp = *(uint16_t *)instruction_pointer(regs); + +good_area: + opcode = (uint32_t)tmp; + + rx = opcode & 0xf; + imm = (opcode >> 4) & 0xf; + rz = (opcode >> 8) & 0xf; + opcode &= 0xf000; + + if (rx == 0 || rx == 1 || rz == 0 || rz == 1) + goto bad_area; + + switch (opcode) { + case OP_LDH: + addr = get_ptreg(regs, rx) + (imm << 1); + ret = ldh_c(regs, rz, addr); + break; + case OP_LDW: + addr = get_ptreg(regs, rx) + (imm << 2); + ret = ldw_c(regs, rz, addr); + break; + case OP_STH: + addr = get_ptreg(regs, rx) + (imm << 1); + ret = sth_c(regs, rz, addr); + break; + case OP_STW: + addr = get_ptreg(regs, rx) + (imm << 2); + ret = stw_c(regs, rz, addr); + break; + } + + if (ret) + goto bad_area; + + regs->pc += 2; + + return; + +bad_area: + if (!user_mode(regs)) { + if (fixup_exception(regs)) + return; + + bust_spinlocks(1); + pr_alert("%s opcode: %x, rz: %d, rx: %d, imm: %d, addr: %x.\n", + __func__, opcode, rz, rx, imm, addr); + show_regs(regs); + bust_spinlocks(0); + make_task_dead(SIGKILL); + } + + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); +} + +static struct ctl_table alignment_tbl[5] = { + { + .procname = "kernel_enable", + .data = &align_kern_enable, + .maxlen = sizeof(align_kern_enable), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + { + .procname = "user_enable", + .data = &align_usr_enable, + .maxlen = sizeof(align_usr_enable), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + { + .procname = "kernel_count", + .data = &align_kern_count, + .maxlen = sizeof(align_kern_count), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + { + .procname = "user_count", + .data = &align_usr_count, + .maxlen = sizeof(align_usr_count), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + {} +}; + +static int __init csky_alignment_init(void) +{ + register_sysctl_init("csky/csky_alignment", alignment_tbl); + return 0; +} + +arch_initcall(csky_alignment_init); diff --git a/arch/csky/abiv1/bswapdi.c b/arch/csky/abiv1/bswapdi.c new file mode 100644 index 0000000000..f50a1d6e33 --- /dev/null +++ b/arch/csky/abiv1/bswapdi.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include + +unsigned long long notrace __bswapdi2(unsigned long long u) +{ + return ___constant_swab64(u); +} +EXPORT_SYMBOL(__bswapdi2); diff --git a/arch/csky/abiv1/bswapsi.c b/arch/csky/abiv1/bswapsi.c new file mode 100644 index 0000000000..0f79182e8a --- /dev/null +++ b/arch/csky/abiv1/bswapsi.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include + +unsigned int notrace __bswapsi2(unsigned int u) +{ + return ___constant_swab32(u); +} +EXPORT_SYMBOL(__bswapsi2); diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c new file mode 100644 index 0000000000..171e8fb322 --- /dev/null +++ b/arch/csky/abiv1/cacheflush.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PG_dcache_clean PG_arch_1 + +void flush_dcache_folio(struct folio *folio) +{ + struct address_space *mapping; + + if (is_zero_pfn(folio_pfn(folio))) + return; + + mapping = folio_flush_mapping(folio); + + if (mapping && !folio_mapped(folio)) + clear_bit(PG_dcache_clean, &folio->flags); + else { + dcache_wbinv_all(); + if (mapping) + icache_inv_all(); + set_bit(PG_dcache_clean, &folio->flags); + } +} +EXPORT_SYMBOL(flush_dcache_folio); + +void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} +EXPORT_SYMBOL(flush_dcache_page); + +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned int nr) +{ + unsigned long pfn = pte_pfn(*ptep); + struct folio *folio; + + flush_tlb_page(vma, addr); + + if (!pfn_valid(pfn)) + return; + + if (is_zero_pfn(pfn)) + return; + + folio = page_folio(pfn_to_page(pfn)); + if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) + dcache_wbinv_all(); + + if (folio_flush_mapping(folio)) { + if (vma->vm_flags & VM_EXEC) + icache_inv_all(); + } +} + +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + dcache_wbinv_all(); + + if (vma->vm_flags & VM_EXEC) + icache_inv_all(); +} diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h new file mode 100644 index 0000000000..908d8b0bc4 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +#include +#include +#include + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +extern void flush_dcache_page(struct page *); +void flush_dcache_folio(struct folio *); +#define flush_dcache_folio flush_dcache_folio + +#define flush_cache_mm(mm) dcache_wbinv_all() +#define flush_cache_page(vma, page, pfn) cache_wbinv_all() +#define flush_cache_dup_mm(mm) cache_wbinv_all() + +#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) +#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) + +#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + dcache_wbinv_all(); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) +{ + dcache_wbinv_all(); +} + +#define ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long vmaddr) +{ + if (PageAnon(page)) + cache_wbinv_all(); +} + +/* + * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. + * Use cache_wbinv_all() here and need to be improved in future. + */ +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +#define flush_cache_vmap(start, end) cache_wbinv_all() +#define flush_cache_vunmap(start, end) cache_wbinv_all() + +#define flush_icache_range(start, end) cache_wbinv_range(start, end) +#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) +#define flush_icache_deferred(mm) do {} while (0); + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ +} while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ + cache_wbinv_all(); \ +} while (0) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/abiv1/inc/abi/ckmmu.h b/arch/csky/abiv1/inc/abi/ckmmu.h new file mode 100644 index 0000000000..416b30c579 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/ckmmu.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CKMMUV1_H +#define __ASM_CSKY_CKMMUV1_H +#include + +static inline int read_mmu_index(void) +{ + return cprcr("cpcr0"); +} + +static inline void write_mmu_index(int value) +{ + cpwcr("cpcr0", value); +} + +static inline int read_mmu_entrylo0(void) +{ + return cprcr("cpcr2") << 6; +} + +static inline int read_mmu_entrylo1(void) +{ + return cprcr("cpcr3") << 6; +} + +static inline void write_mmu_pagemask(int value) +{ + cpwcr("cpcr6", value); +} + +static inline int read_mmu_entryhi(void) +{ + return cprcr("cpcr4"); +} + +static inline void write_mmu_entryhi(int value) +{ + cpwcr("cpcr4", value); +} + +static inline unsigned long read_mmu_msa0(void) +{ + return cprcr("cpcr30"); +} + +static inline void write_mmu_msa0(unsigned long value) +{ + cpwcr("cpcr30", value); +} + +static inline unsigned long read_mmu_msa1(void) +{ + return cprcr("cpcr31"); +} + +static inline void write_mmu_msa1(unsigned long value) +{ + cpwcr("cpcr31", value); +} + +/* + * TLB operations. + */ +static inline void tlb_probe(void) +{ + cpwcr("cpcr8", 0x80000000); +} + +static inline void tlb_read(void) +{ + cpwcr("cpcr8", 0x40000000); +} + +static inline void tlb_invalid_all(void) +{ + cpwcr("cpcr8", 0x04000000); +} + + +static inline void local_tlb_invalid_all(void) +{ + tlb_invalid_all(); +} + +static inline void tlb_invalid_indexed(void) +{ + cpwcr("cpcr8", 0x02000000); +} + +static inline void setup_pgd(pgd_t *pgd, int asid) +{ + cpwcr("cpcr29", __pa(pgd) | BIT(0)); + write_mmu_entryhi(asid); +} + +static inline pgd_t *get_pgd(void) +{ + return __va(cprcr("cpcr29") & ~BIT(0)); +} +#endif /* __ASM_CSKY_CKMMUV1_H */ diff --git a/arch/csky/abiv1/inc/abi/elf.h b/arch/csky/abiv1/inc/abi/elf.h new file mode 100644 index 0000000000..3058cc06b1 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/elf.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_ELF_H +#define __ABI_CSKY_ELF_H + +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = regs->pc; \ + pr_reg[1] = regs->regs[9]; \ + pr_reg[2] = regs->usp; \ + pr_reg[3] = regs->sr; \ + pr_reg[4] = regs->a0; \ + pr_reg[5] = regs->a1; \ + pr_reg[6] = regs->a2; \ + pr_reg[7] = regs->a3; \ + pr_reg[8] = regs->regs[0]; \ + pr_reg[9] = regs->regs[1]; \ + pr_reg[10] = regs->regs[2]; \ + pr_reg[11] = regs->regs[3]; \ + pr_reg[12] = regs->regs[4]; \ + pr_reg[13] = regs->regs[5]; \ + pr_reg[14] = regs->regs[6]; \ + pr_reg[15] = regs->regs[7]; \ + pr_reg[16] = regs->regs[8]; \ + pr_reg[17] = regs->lr; \ +} while (0); +#endif /* __ABI_CSKY_ELF_H */ diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h new file mode 100644 index 0000000000..b6a2109b89 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/entry.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ENTRY_H +#define __ASM_CSKY_ENTRY_H + +#include +#include + +#define LSAVE_PC 8 +#define LSAVE_PSR 12 +#define LSAVE_A0 24 +#define LSAVE_A1 28 +#define LSAVE_A2 32 +#define LSAVE_A3 36 +#define LSAVE_A4 40 +#define LSAVE_A5 44 + +#define usp ss1 + +.macro USPTOKSP + mtcr sp, usp + mfcr sp, ss0 +.endm + +.macro KSPTOUSP + mtcr sp, ss0 + mfcr sp, usp +.endm + +.macro SAVE_ALL epc_inc + mtcr r13, ss2 + mfcr r13, epsr + btsti r13, 31 + bt 1f + USPTOKSP +1: + subi sp, 32 + subi sp, 32 + subi sp, 16 + stw r13, (sp, 12) + + stw lr, (sp, 4) + + mfcr lr, epc + movi r13, \epc_inc + add lr, r13 + stw lr, (sp, 8) + + mov lr, sp + addi lr, 32 + addi lr, 32 + addi lr, 16 + bt 2f + mfcr lr, ss1 +2: + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 32 + addi sp, 8 + mfcr r13, ss2 + stw r6, (sp) + stw r7, (sp, 4) + stw r8, (sp, 8) + stw r9, (sp, 12) + stw r10, (sp, 16) + stw r11, (sp, 20) + stw r12, (sp, 24) + stw r13, (sp, 28) + stw r14, (sp, 32) + stw r1, (sp, 36) + subi sp, 32 + subi sp, 8 +.endm + +.macro RESTORE_ALL + ldw lr, (sp, 4) + ldw a0, (sp, 8) + mtcr a0, epc + ldw a0, (sp, 12) + mtcr a0, epsr + btsti a0, 31 + bt 1f + ldw a0, (sp, 16) + mtcr a0, ss1 +1: + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 32 + addi sp, 8 + ldw r6, (sp) + ldw r7, (sp, 4) + ldw r8, (sp, 8) + ldw r9, (sp, 12) + ldw r10, (sp, 16) + ldw r11, (sp, 20) + ldw r12, (sp, 24) + ldw r13, (sp, 28) + ldw r14, (sp, 32) + ldw r1, (sp, 36) + addi sp, 32 + addi sp, 8 + + bt 2f + KSPTOUSP +2: + rte +.endm + +.macro SAVE_SWITCH_STACK + subi sp, 32 + stm r8-r15, (sp) +.endm + +.macro RESTORE_SWITCH_STACK + ldm r8-r15, (sp) + addi sp, 32 +.endm + +/* MMU registers operators. */ +.macro RD_MIR rx + cprcr \rx, cpcr0 +.endm + +.macro RD_MEH rx + cprcr \rx, cpcr4 +.endm + +.macro RD_MCIR rx + cprcr \rx, cpcr8 +.endm + +.macro RD_PGDR rx + cprcr \rx, cpcr29 +.endm + +.macro WR_MEH rx + cpwcr \rx, cpcr4 +.endm + +.macro WR_MCIR rx + cpwcr \rx, cpcr8 +.endm + +.macro SETUP_MMU + /* Init psr and enable ee */ + lrw r6, DEFAULT_PSR_VALUE + mtcr r6, psr + psrset ee + + /* Select MMU as co-processor */ + cpseti cp15 + + /* + * cpcr30 format: + * 31 - 29 | 28 - 4 | 3 | 2 | 1 | 0 + * BA Reserved C D V + */ + cprcr r6, cpcr30 + lsri r6, 29 + lsli r6, 29 + addi r6, 0xe + cpwcr r6, cpcr30 + + movi r6, 0 + cpwcr r6, cpcr31 +.endm +#endif /* __ASM_CSKY_ENTRY_H */ diff --git a/arch/csky/abiv1/inc/abi/page.h b/arch/csky/abiv1/inc/abi/page.h new file mode 100644 index 0000000000..2d2159933b --- /dev/null +++ b/arch/csky/abiv1/inc/abi/page.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +extern void flush_dcache_page(struct page *page); + +static inline unsigned long pages_do_alias(unsigned long addr1, + unsigned long addr2) +{ + return (addr1 ^ addr2) & (SHMLBA-1); +} + +static inline void clear_user_page(void *addr, unsigned long vaddr, + struct page *page) +{ + clear_page(addr); + if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) + flush_dcache_page(page); +} + +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *page) +{ + copy_page(to, from); + if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK)) + flush_dcache_page(page); +} diff --git a/arch/csky/abiv1/inc/abi/pgtable-bits.h b/arch/csky/abiv1/inc/abi/pgtable-bits.h new file mode 100644 index 0000000000..ae7a2f76dd --- /dev/null +++ b/arch/csky/abiv1/inc/abi/pgtable-bits.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PGTABLE_BITS_H +#define __ASM_CSKY_PGTABLE_BITS_H + +/* implemented in software */ +#define _PAGE_PRESENT (1<<0) +#define _PAGE_READ (1<<1) +#define _PAGE_WRITE (1<<2) +#define _PAGE_ACCESSED (1<<3) +#define _PAGE_MODIFIED (1<<4) + +/* We borrow bit 9 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE (1<<9) + +/* implemented in hardware */ +#define _PAGE_GLOBAL (1<<6) +#define _PAGE_VALID (1<<7) +#define _PAGE_DIRTY (1<<8) + +#define _PAGE_CACHE (3<<9) +#define _PAGE_UNCACHE (2<<9) +#define _PAGE_SO _PAGE_UNCACHE +#define _CACHE_MASK (7<<9) + +#define _CACHE_CACHED _PAGE_CACHE +#define _CACHE_UNCACHED _PAGE_UNCACHE + +#define _PAGE_PROT_NONE _PAGE_READ + +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTE: + * bit 0: _PAGE_PRESENT (zero) + * bit 1: _PAGE_READ (zero) + * bit 2 - 5: swap type[0 - 3] + * bit 6: _PAGE_GLOBAL (zero) + * bit 7: _PAGE_VALID (zero) + * bit 8: swap type[4] + * bit 9: exclusive marker + * bit 10 - 31: swap offset + */ +#define __swp_type(x) ((((x).val >> 2) & 0xf) | \ + (((x).val >> 4) & 0x10)) +#define __swp_offset(x) ((x).val >> 10) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type & 0xf) << 2) | \ + ((type & 0x10) << 4) | \ + ((offset) << 10)}) + +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv1/inc/abi/reg_ops.h b/arch/csky/abiv1/inc/abi/reg_ops.h new file mode 100644 index 0000000000..abd01a2433 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/reg_ops.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_REG_OPS_H +#define __ABI_REG_OPS_H +#include + +#define cprcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprcr %0, "reg"\n":"=b"(tmp)); \ + tmp; \ +}) + +#define cpwcr(reg, val) \ +({ \ + asm volatile("cpwcr %0, "reg"\n"::"b"(val)); \ +}) + +static inline unsigned int mfcr_hint(void) +{ + return mfcr("cr30"); +} + +static inline unsigned int mfcr_ccr2(void) { return 0; } + +#endif /* __ABI_REG_OPS_H */ diff --git a/arch/csky/abiv1/inc/abi/regdef.h b/arch/csky/abiv1/inc/abi/regdef.h new file mode 100644 index 0000000000..7b386fd670 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/regdef.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_REGDEF_H +#define __ASM_CSKY_REGDEF_H + +#ifdef __ASSEMBLY__ +#define syscallid r1 +#else +#define syscallid "r1" +#endif + +#define regs_syscallid(regs) regs->regs[9] +#define regs_fp(regs) regs->regs[2] + +/* + * PSR format: + * | 31 | 30-24 | 23-16 | 15 14 | 13-0 | + * S CPID VEC TM + * + * S: Super Mode + * CPID: Coprocessor id, only 15 for MMU + * VEC: Exception Number + * TM: Trace Mode + */ +#define DEFAULT_PSR_VALUE 0x8f000000 + +#define SYSTRACE_SAVENUM 2 + +#define TRAP0_SIZE 2 + +#endif /* __ASM_CSKY_REGDEF_H */ diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h new file mode 100644 index 0000000000..de50117b90 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/string.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_STRING_H +#define __ABI_CSKY_STRING_H + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv1/inc/abi/switch_context.h b/arch/csky/abiv1/inc/abi/switch_context.h new file mode 100644 index 0000000000..ec73fd7c9f --- /dev/null +++ b/arch/csky/abiv1/inc/abi/switch_context.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_PTRACE_H +#define __ABI_CSKY_PTRACE_H + +struct switch_stack { + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; +}; +#endif /* __ABI_CSKY_PTRACE_H */ diff --git a/arch/csky/abiv1/inc/abi/vdso.h b/arch/csky/abiv1/inc/abi/vdso.h new file mode 100644 index 0000000000..9e6d0a2fdd --- /dev/null +++ b/arch/csky/abiv1/inc/abi/vdso.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_VDSO_H +#define __ABI_CSKY_VDSO_H + +/* movi r1, 127; addi r1, (139 - 127) */ +#define SET_SYSCALL_ID .long 0x20b167f1 + +#endif /* __ABI_CSKY_VDSO_H */ diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c new file mode 100644 index 0000000000..6792aca499 --- /dev/null +++ b/arch/csky/abiv1/mmap.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include + +#define COLOUR_ALIGN(addr,pgoff) \ + ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ + (((pgoff)<mm; + struct vm_area_struct *vma; + int do_align = 0; + struct vm_unmapped_area_info info; + + /* + * We only need to do colour alignment if either the I or D + * caches alias. + */ + do_align = filp || (flags & MAP_SHARED); + + /* + * We enforce the MAP_FIXED case. + */ + if (flags & MAP_FIXED) { + if (flags & MAP_SHARED && + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); +} diff --git a/arch/csky/abiv2/Makefile b/arch/csky/abiv2/Makefile new file mode 100644 index 0000000000..ea8005fe01 --- /dev/null +++ b/arch/csky/abiv2/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += cacheflush.o +obj-$(CONFIG_CPU_HAS_FPU) += fpu.o +obj-y += memcmp.o +ifeq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_STRING_OPS), y) +obj-y += memcpy.o +obj-y += memmove.o +obj-y += memset.o +endif +obj-y += strcmp.o +obj-y += strcpy.o +obj-y += strlen.o +obj-y += strksyms.o +obj-$(CONFIG_FUNCTION_TRACER) += mcount.o diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c new file mode 100644 index 0000000000..876028b108 --- /dev/null +++ b/arch/csky/abiv2/cacheflush.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include + +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long address, pte_t *pte, unsigned int nr) +{ + unsigned long pfn = pte_pfn(*pte); + struct folio *folio; + unsigned int i; + + flush_tlb_page(vma, address); + + if (!pfn_valid(pfn)) + return; + + folio = page_folio(pfn_to_page(pfn)); + + if (test_and_set_bit(PG_dcache_clean, &folio->flags)) + return; + + icache_inv_range(address, address + nr*PAGE_SIZE); + for (i = 0; i < folio_nr_pages(folio); i++) { + unsigned long addr = (unsigned long) kmap_local_folio(folio, + i * PAGE_SIZE); + + dcache_wb_range(addr, addr + PAGE_SIZE); + if (vma->vm_flags & VM_EXEC) + icache_inv_range(addr, addr + PAGE_SIZE); + kunmap_local((void *) addr); + } +} + +void flush_icache_deferred(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_icache_inv_all(NULL); + } +} + +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + unsigned int cpu; + cpumask_t others, *mask; + + preempt_disable(); + +#ifdef CONFIG_CPU_HAS_ICACHE_INS + if (mm == current->mm) { + icache_inv_range(start, end); + preempt_enable(); + return; + } +#endif + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_icache_inv_all(NULL); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + + if (mm != current->active_mm || !cpumask_empty(&others)) { + on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); + cpumask_clear(mask); + } + + preempt_enable(); +} diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c new file mode 100644 index 0000000000..5acc5c2e54 --- /dev/null +++ b/arch/csky/abiv2/fpu.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include + +#define MTCR_MASK 0xFC00FFE0 +#define MFCR_MASK 0xFC00FFE0 +#define MTCR_DIST 0xC0006420 +#define MFCR_DIST 0xC0006020 + +/* + * fpu_libc_helper() is to help libc to excute: + * - mfcr %a, cr<1, 2> + * - mfcr %a, cr<2, 2> + * - mtcr %a, cr<1, 2> + * - mtcr %a, cr<2, 2> + */ +int fpu_libc_helper(struct pt_regs *regs) +{ + int fault; + unsigned long instrptr, regx = 0; + unsigned long index = 0, tmp = 0; + unsigned long tinstr = 0; + u16 instr_hi, instr_low; + + instrptr = instruction_pointer(regs); + if (instrptr & 1) + return 0; + + fault = __get_user(instr_low, (u16 *)instrptr); + if (fault) + return 0; + + fault = __get_user(instr_hi, (u16 *)(instrptr + 2)); + if (fault) + return 0; + + tinstr = instr_hi | ((unsigned long)instr_low << 16); + + if (((tinstr >> 21) & 0x1F) != 2) + return 0; + + if ((tinstr & MTCR_MASK) == MTCR_DIST) { + index = (tinstr >> 16) & 0x1F; + if (index > 13) + return 0; + + tmp = tinstr & 0x1F; + if (tmp > 2) + return 0; + + regx = *(®s->a0 + index); + + if (tmp == 1) + mtcr("cr<1, 2>", regx); + else if (tmp == 2) + mtcr("cr<2, 2>", regx); + else + return 0; + + regs->pc += 4; + return 1; + } + + if ((tinstr & MFCR_MASK) == MFCR_DIST) { + index = tinstr & 0x1F; + if (index > 13) + return 0; + + tmp = ((tinstr >> 16) & 0x1F); + if (tmp > 2) + return 0; + + if (tmp == 1) + regx = mfcr("cr<1, 2>"); + else if (tmp == 2) + regx = mfcr("cr<2, 2>"); + else + return 0; + + *(®s->a0 + index) = regx; + + regs->pc += 4; + return 1; + } + + return 0; +} + +void fpu_fpe(struct pt_regs *regs) +{ + int sig, code; + unsigned int fesr; + + fesr = mfcr("cr<2, 2>"); + + sig = SIGFPE; + code = FPE_FLTUNK; + + if (fesr & FPE_ILLE) { + sig = SIGILL; + code = ILL_ILLOPC; + } else if (fesr & FPE_IDC) { + sig = SIGILL; + code = ILL_ILLOPN; + } else if (fesr & FPE_FEC) { + sig = SIGFPE; + if (fesr & FPE_IOC) + code = FPE_FLTINV; + else if (fesr & FPE_DZC) + code = FPE_FLTDIV; + else if (fesr & FPE_UFC) + code = FPE_FLTUND; + else if (fesr & FPE_OFC) + code = FPE_FLTOVF; + else if (fesr & FPE_IXC) + code = FPE_FLTRES; + } + + force_sig_fault(sig, code, (void __user *)regs->pc); +} + +#define FMFVR_FPU_REGS(vrx, vry) \ + "fmfvrl %0, "#vrx"\n" \ + "fmfvrh %1, "#vrx"\n" \ + "fmfvrl %2, "#vry"\n" \ + "fmfvrh %3, "#vry"\n" + +#define FMTVR_FPU_REGS(vrx, vry) \ + "fmtvrl "#vrx", %0\n" \ + "fmtvrh "#vrx", %1\n" \ + "fmtvrl "#vry", %2\n" \ + "fmtvrh "#vry", %3\n" + +#define STW_FPU_REGS(a, b, c, d) \ + "stw %0, (%4, "#a")\n" \ + "stw %1, (%4, "#b")\n" \ + "stw %2, (%4, "#c")\n" \ + "stw %3, (%4, "#d")\n" + +#define LDW_FPU_REGS(a, b, c, d) \ + "ldw %0, (%4, "#a")\n" \ + "ldw %1, (%4, "#b")\n" \ + "ldw %2, (%4, "#c")\n" \ + "ldw %3, (%4, "#d")\n" + +void save_to_user_fp(struct user_fp *user_fp) +{ + unsigned long flg; + unsigned long tmp1, tmp2; + unsigned long *fpregs; + + local_irq_save(flg); + + tmp1 = mfcr("cr<1, 2>"); + tmp2 = mfcr("cr<2, 2>"); + + user_fp->fcr = tmp1; + user_fp->fesr = tmp2; + + fpregs = &user_fp->vr[0]; +#ifdef CONFIG_CPU_HAS_FPUV2 +#ifdef CONFIG_CPU_HAS_VDSP + asm volatile( + "vstmu.32 vr0-vr3, (%0)\n" + "vstmu.32 vr4-vr7, (%0)\n" + "vstmu.32 vr8-vr11, (%0)\n" + "vstmu.32 vr12-vr15, (%0)\n" + "fstmu.64 vr16-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#else + asm volatile( + "fstmu.64 vr0-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#endif +#else + { + unsigned long tmp3, tmp4; + + asm volatile( + FMFVR_FPU_REGS(vr0, vr1) + STW_FPU_REGS(0, 4, 16, 20) + FMFVR_FPU_REGS(vr2, vr3) + STW_FPU_REGS(32, 36, 48, 52) + FMFVR_FPU_REGS(vr4, vr5) + STW_FPU_REGS(64, 68, 80, 84) + FMFVR_FPU_REGS(vr6, vr7) + STW_FPU_REGS(96, 100, 112, 116) + "addi %4, 128\n" + FMFVR_FPU_REGS(vr8, vr9) + STW_FPU_REGS(0, 4, 16, 20) + FMFVR_FPU_REGS(vr10, vr11) + STW_FPU_REGS(32, 36, 48, 52) + FMFVR_FPU_REGS(vr12, vr13) + STW_FPU_REGS(64, 68, 80, 84) + FMFVR_FPU_REGS(vr14, vr15) + STW_FPU_REGS(96, 100, 112, 116) + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), + "=a"(tmp4), "+a"(fpregs) + ::"memory"); + } +#endif + + local_irq_restore(flg); +} + +void restore_from_user_fp(struct user_fp *user_fp) +{ + unsigned long flg; + unsigned long tmp1, tmp2; + unsigned long *fpregs; + + local_irq_save(flg); + + tmp1 = user_fp->fcr; + tmp2 = user_fp->fesr; + + mtcr("cr<1, 2>", tmp1); + mtcr("cr<2, 2>", tmp2); + + fpregs = &user_fp->vr[0]; +#ifdef CONFIG_CPU_HAS_FPUV2 +#ifdef CONFIG_CPU_HAS_VDSP + asm volatile( + "vldmu.32 vr0-vr3, (%0)\n" + "vldmu.32 vr4-vr7, (%0)\n" + "vldmu.32 vr8-vr11, (%0)\n" + "vldmu.32 vr12-vr15, (%0)\n" + "fldmu.64 vr16-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#else + asm volatile( + "fldmu.64 vr0-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#endif +#else + { + unsigned long tmp3, tmp4; + + asm volatile( + LDW_FPU_REGS(0, 4, 16, 20) + FMTVR_FPU_REGS(vr0, vr1) + LDW_FPU_REGS(32, 36, 48, 52) + FMTVR_FPU_REGS(vr2, vr3) + LDW_FPU_REGS(64, 68, 80, 84) + FMTVR_FPU_REGS(vr4, vr5) + LDW_FPU_REGS(96, 100, 112, 116) + FMTVR_FPU_REGS(vr6, vr7) + "addi %4, 128\n" + LDW_FPU_REGS(0, 4, 16, 20) + FMTVR_FPU_REGS(vr8, vr9) + LDW_FPU_REGS(32, 36, 48, 52) + FMTVR_FPU_REGS(vr10, vr11) + LDW_FPU_REGS(64, 68, 80, 84) + FMTVR_FPU_REGS(vr12, vr13) + LDW_FPU_REGS(96, 100, 112, 116) + FMTVR_FPU_REGS(vr14, vr15) + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), + "=a"(tmp4), "+a"(fpregs) + ::"memory"); + } +#endif + local_irq_restore(flg); +} diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h new file mode 100644 index 0000000000..40be169072 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) + +#define PG_dcache_clean PG_arch_1 + +static inline void flush_dcache_folio(struct folio *folio) +{ + if (test_bit(PG_dcache_clean, &folio->flags)) + clear_bit(PG_dcache_clean, &folio->flags); +} +#define flush_dcache_folio flush_dcache_folio + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +static inline void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#define flush_icache_range(start, end) cache_wbinv_range(start, end) + +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +void flush_icache_deferred(struct mm_struct *mm); + +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ + if (vma->vm_flags & VM_EXEC) { \ + dcache_wb_range((unsigned long)dst, \ + (unsigned long)dst + len); \ + flush_icache_mm_range(current->mm, \ + (unsigned long)dst, \ + (unsigned long)dst + len); \ + } \ +} while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h new file mode 100644 index 0000000000..64215f2380 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/ckmmu.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CKMMUV2_H +#define __ASM_CSKY_CKMMUV2_H + +#include +#include + +static inline int read_mmu_index(void) +{ + return mfcr("cr<0, 15>"); +} + +static inline void write_mmu_index(int value) +{ + mtcr("cr<0, 15>", value); +} + +static inline int read_mmu_entrylo0(void) +{ + return mfcr("cr<2, 15>"); +} + +static inline int read_mmu_entrylo1(void) +{ + return mfcr("cr<3, 15>"); +} + +static inline void write_mmu_pagemask(int value) +{ + mtcr("cr<6, 15>", value); +} + +static inline int read_mmu_entryhi(void) +{ + return mfcr("cr<4, 15>"); +} + +static inline void write_mmu_entryhi(int value) +{ + mtcr("cr<4, 15>", value); +} + +static inline unsigned long read_mmu_msa0(void) +{ + return mfcr("cr<30, 15>"); +} + +static inline void write_mmu_msa0(unsigned long value) +{ + mtcr("cr<30, 15>", value); +} + +static inline unsigned long read_mmu_msa1(void) +{ + return mfcr("cr<31, 15>"); +} + +static inline void write_mmu_msa1(unsigned long value) +{ + mtcr("cr<31, 15>", value); +} + +/* + * TLB operations. + */ +static inline void tlb_probe(void) +{ + mtcr("cr<8, 15>", 0x80000000); +} + +static inline void tlb_read(void) +{ + mtcr("cr<8, 15>", 0x40000000); +} + +static inline void tlb_invalid_all(void) +{ +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + asm volatile( + "tlbi.alls \n" + "sync.i \n" + : + : + : "memory"); +#else + mtcr("cr<8, 15>", 0x04000000); +#endif +} + +static inline void local_tlb_invalid_all(void) +{ +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + asm volatile( + "tlbi.all \n" + "sync.i \n" + : + : + : "memory"); +#else + tlb_invalid_all(); +#endif +} + +static inline void tlb_invalid_indexed(void) +{ + mtcr("cr<8, 15>", 0x02000000); +} + +#define NOP32 ".long 0x4820c400\n" + +static inline void setup_pgd(pgd_t *pgd, int asid) +{ +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); +#else + mb(); +#endif + asm volatile( +#ifdef CONFIG_CPU_HAS_TLBI + "mtcr %1, cr<28, 15> \n" +#endif + "mtcr %1, cr<29, 15> \n" + "mtcr %0, cr< 4, 15> \n" + ".rept 64 \n" + NOP32 + ".endr \n" + : + :"r"(asid), "r"(__pa(pgd) | BIT(0)) + :"memory"); +} + +static inline pgd_t *get_pgd(void) +{ + return __va(mfcr("cr<29, 15>") & ~BIT(0)); +} +#endif /* __ASM_CSKY_CKMMUV2_H */ diff --git a/arch/csky/abiv2/inc/abi/elf.h b/arch/csky/abiv2/inc/abi/elf.h new file mode 100644 index 0000000000..290f49ef4c --- /dev/null +++ b/arch/csky/abiv2/inc/abi/elf.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_ELF_H +#define __ABI_CSKY_ELF_H + +/* The member sort in array pr_reg[x] is defined by GDB. */ +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = regs->pc; \ + pr_reg[1] = regs->a1; \ + pr_reg[2] = regs->a0; \ + pr_reg[3] = regs->sr; \ + pr_reg[4] = regs->a2; \ + pr_reg[5] = regs->a3; \ + pr_reg[6] = regs->regs[0]; \ + pr_reg[7] = regs->regs[1]; \ + pr_reg[8] = regs->regs[2]; \ + pr_reg[9] = regs->regs[3]; \ + pr_reg[10] = regs->regs[4]; \ + pr_reg[11] = regs->regs[5]; \ + pr_reg[12] = regs->regs[6]; \ + pr_reg[13] = regs->regs[7]; \ + pr_reg[14] = regs->regs[8]; \ + pr_reg[15] = regs->regs[9]; \ + pr_reg[16] = regs->usp; \ + pr_reg[17] = regs->lr; \ + pr_reg[18] = regs->exregs[0]; \ + pr_reg[19] = regs->exregs[1]; \ + pr_reg[20] = regs->exregs[2]; \ + pr_reg[21] = regs->exregs[3]; \ + pr_reg[22] = regs->exregs[4]; \ + pr_reg[23] = regs->exregs[5]; \ + pr_reg[24] = regs->exregs[6]; \ + pr_reg[25] = regs->exregs[7]; \ + pr_reg[26] = regs->exregs[8]; \ + pr_reg[27] = regs->exregs[9]; \ + pr_reg[28] = regs->exregs[10]; \ + pr_reg[29] = regs->exregs[11]; \ + pr_reg[30] = regs->exregs[12]; \ + pr_reg[31] = regs->exregs[13]; \ + pr_reg[32] = regs->exregs[14]; \ + pr_reg[33] = regs->tls; \ +} while (0); +#endif /* __ABI_CSKY_ELF_H */ diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h new file mode 100644 index 0000000000..cca63e699b --- /dev/null +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ENTRY_H +#define __ASM_CSKY_ENTRY_H + +#include +#include + +#define LSAVE_PC 8 +#define LSAVE_PSR 12 +#define LSAVE_A0 24 +#define LSAVE_A1 28 +#define LSAVE_A2 32 +#define LSAVE_A3 36 +#define LSAVE_A4 40 +#define LSAVE_A5 44 + +#define KSPTOUSP +#define USPTOKSP + +#define usp cr<14, 1> + +.macro SAVE_ALL epc_inc + subi sp, 152 + stw tls, (sp, 0) + stw lr, (sp, 4) + + RD_MEH lr + WR_MEH lr + + mfcr lr, epc + movi tls, \epc_inc + add lr, tls + stw lr, (sp, 8) + + mfcr lr, epsr + stw lr, (sp, 12) + btsti lr, 31 + bf 1f + addi lr, sp, 152 + br 2f +1: + mfcr lr, usp +2: + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 40 + stm r4-r13, (sp) + + addi sp, 40 + stm r16-r30, (sp) +#ifdef CONFIG_CPU_HAS_HILO + mfhi lr + stw lr, (sp, 60) + mflo lr + stw lr, (sp, 64) + mfcr lr, cr14 + stw lr, (sp, 68) +#endif + subi sp, 80 +.endm + +.macro RESTORE_ALL + ldw tls, (sp, 0) + ldw lr, (sp, 4) + ldw a0, (sp, 8) + mtcr a0, epc + ldw a0, (sp, 12) + mtcr a0, epsr + btsti a0, 31 + ldw a0, (sp, 16) + mtcr a0, usp + mtcr a0, ss0 + +#ifdef CONFIG_CPU_HAS_HILO + ldw a0, (sp, 140) + mthi a0 + ldw a0, (sp, 144) + mtlo a0 + ldw a0, (sp, 148) + mtcr a0, cr14 +#endif + + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 40 + ldm r4-r13, (sp) + addi sp, 40 + ldm r16-r30, (sp) + addi sp, 72 + bf 1f + mfcr sp, ss0 +1: + rte +.endm + +.macro SAVE_REGS_FTRACE + subi sp, 152 + stw tls, (sp, 0) + stw lr, (sp, 4) + + mfcr lr, psr + stw lr, (sp, 12) + + addi lr, sp, 152 + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 40 + stm r4-r13, (sp) + + addi sp, 40 + stm r16-r30, (sp) +#ifdef CONFIG_CPU_HAS_HILO + mfhi lr + stw lr, (sp, 60) + mflo lr + stw lr, (sp, 64) + mfcr lr, cr14 + stw lr, (sp, 68) +#endif + subi sp, 80 +.endm + +.macro RESTORE_REGS_FTRACE + ldw tls, (sp, 0) + +#ifdef CONFIG_CPU_HAS_HILO + ldw a0, (sp, 140) + mthi a0 + ldw a0, (sp, 144) + mtlo a0 + ldw a0, (sp, 148) + mtcr a0, cr14 +#endif + + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 40 + ldm r4-r13, (sp) + addi sp, 40 + ldm r16-r30, (sp) + addi sp, 72 +.endm + +.macro SAVE_SWITCH_STACK + subi sp, 64 + stm r4-r11, (sp) + stw lr, (sp, 32) + stw r16, (sp, 36) + stw r17, (sp, 40) + stw r26, (sp, 44) + stw r27, (sp, 48) + stw r28, (sp, 52) + stw r29, (sp, 56) + stw r30, (sp, 60) +#ifdef CONFIG_CPU_HAS_HILO + subi sp, 16 + mfhi lr + stw lr, (sp, 0) + mflo lr + stw lr, (sp, 4) + mfcr lr, cr14 + stw lr, (sp, 8) +#endif +.endm + +.macro RESTORE_SWITCH_STACK +#ifdef CONFIG_CPU_HAS_HILO + ldw lr, (sp, 0) + mthi lr + ldw lr, (sp, 4) + mtlo lr + ldw lr, (sp, 8) + mtcr lr, cr14 + addi sp, 16 +#endif + ldm r4-r11, (sp) + ldw lr, (sp, 32) + ldw r16, (sp, 36) + ldw r17, (sp, 40) + ldw r26, (sp, 44) + ldw r27, (sp, 48) + ldw r28, (sp, 52) + ldw r29, (sp, 56) + ldw r30, (sp, 60) + addi sp, 64 +.endm + +/* MMU registers operators. */ +.macro RD_MIR rx + mfcr \rx, cr<0, 15> +.endm + +.macro RD_MEH rx + mfcr \rx, cr<4, 15> +.endm + +.macro RD_MCIR rx + mfcr \rx, cr<8, 15> +.endm + +.macro RD_PGDR rx + mfcr \rx, cr<29, 15> +.endm + +.macro RD_PGDR_K rx + mfcr \rx, cr<28, 15> +.endm + +.macro WR_MEH rx + mtcr \rx, cr<4, 15> +.endm + +.macro WR_MCIR rx + mtcr \rx, cr<8, 15> +.endm + +#ifdef CONFIG_PAGE_OFFSET_80000000 +#define MSA_SET cr<30, 15> +#define MSA_CLR cr<31, 15> +#endif + +#ifdef CONFIG_PAGE_OFFSET_A0000000 +#define MSA_SET cr<31, 15> +#define MSA_CLR cr<30, 15> +#endif + +.macro SETUP_MMU + /* Init psr and enable ee */ + lrw r6, DEFAULT_PSR_VALUE + mtcr r6, psr + psrset ee + + /* Invalid I/Dcache BTB BHT */ + movi r6, 7 + lsli r6, 16 + addi r6, (1<<4) | 3 + mtcr r6, cr17 + + /* Invalid all TLB */ + bgeni r6, 26 + mtcr r6, cr<8, 15> /* Set MCIR */ + + /* Check MMU on/off */ + mfcr r6, cr18 + btsti r6, 0 + bt 1f + + /* MMU off: setup mapping tlb entry */ + movi r6, 0 + mtcr r6, cr<6, 15> /* Set MPR with 4K page size */ + + grs r6, 1f /* Get current pa by PC */ + bmaski r7, (PAGE_SHIFT + 1) /* r7 = 0x1fff */ + andn r6, r7 + mtcr r6, cr<4, 15> /* Set MEH */ + + mov r8, r6 + movi r7, 0x00000006 + or r8, r7 + mtcr r8, cr<2, 15> /* Set MEL0 */ + movi r7, 0x00001006 + or r8, r7 + mtcr r8, cr<3, 15> /* Set MEL1 */ + + bgeni r8, 28 + mtcr r8, cr<8, 15> /* Set MCIR to write TLB */ + + br 2f +1: + /* + * MMU on: use origin MSA value from bootloader + * + * cr<30/31, 15> MSA register format: + * 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 + * BA Reserved SH WA B SO SEC C D V + */ + mfcr r6, MSA_SET /* Get MSA */ +2: + lsri r6, 29 + lsli r6, 29 + addi r6, 0x1ce + mtcr r6, MSA_SET /* Set MSA */ + + movi r6, 0 + mtcr r6, MSA_CLR /* Clr MSA */ + + /* enable MMU */ + mfcr r6, cr18 + bseti r6, 0 + mtcr r6, cr18 + + jmpi 3f /* jump to va */ +3: +.endm +#endif /* __ASM_CSKY_ENTRY_H */ diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h new file mode 100644 index 0000000000..aabb793550 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/fpu.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_FPU_H +#define __ASM_CSKY_FPU_H + +#include +#include + +int fpu_libc_helper(struct pt_regs *regs); +void fpu_fpe(struct pt_regs *regs); + +static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); } + +void save_to_user_fp(struct user_fp *user_fp); +void restore_from_user_fp(struct user_fp *user_fp); + +/* + * Define the fesr bit for fpe handle. + */ +#define FPE_ILLE (1 << 16) /* Illegal instruction */ +#define FPE_FEC (1 << 7) /* Input float-point arithmetic exception */ +#define FPE_IDC (1 << 5) /* Input denormalized exception */ +#define FPE_IXC (1 << 4) /* Inexact exception */ +#define FPE_UFC (1 << 3) /* Underflow exception */ +#define FPE_OFC (1 << 2) /* Overflow exception */ +#define FPE_DZC (1 << 1) /* Divide by zero exception */ +#define FPE_IOC (1 << 0) /* Invalid operation exception */ +#define FPE_REGULAR_EXCEPTION (FPE_IXC | FPE_UFC | FPE_OFC | FPE_DZC | FPE_IOC) + +#ifdef CONFIG_OPEN_FPU_IDE +#define IDE_STAT (1 << 5) +#else +#define IDE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_IXE +#define IXE_STAT (1 << 4) +#else +#define IXE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_UFE +#define UFE_STAT (1 << 3) +#else +#define UFE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_OFE +#define OFE_STAT (1 << 2) +#else +#define OFE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_DZE +#define DZE_STAT (1 << 1) +#else +#define DZE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_IOE +#define IOE_STAT (1 << 0) +#else +#define IOE_STAT 0 +#endif + +#endif /* __ASM_CSKY_FPU_H */ diff --git a/arch/csky/abiv2/inc/abi/page.h b/arch/csky/abiv2/inc/abi/page.h new file mode 100644 index 0000000000..cf005f13cd --- /dev/null +++ b/arch/csky/abiv2/inc/abi/page.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +static inline void clear_user_page(void *addr, unsigned long vaddr, + struct page *page) +{ + clear_page(addr); +} + +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *page) +{ + copy_page(to, from); +} diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h new file mode 100644 index 0000000000..526152bd21 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PGTABLE_BITS_H +#define __ASM_CSKY_PGTABLE_BITS_H + +/* implemented in software */ +#define _PAGE_ACCESSED (1<<7) +#define _PAGE_READ (1<<8) +#define _PAGE_WRITE (1<<9) +#define _PAGE_PRESENT (1<<10) +#define _PAGE_MODIFIED (1<<11) + +/* We borrow bit 7 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE (1<<7) + +/* implemented in hardware */ +#define _PAGE_GLOBAL (1<<0) +#define _PAGE_VALID (1<<1) +#define _PAGE_DIRTY (1<<2) + +#define _PAGE_SO (1<<5) +#define _PAGE_BUF (1<<6) +#define _PAGE_CACHE (1<<3) +#define _CACHE_MASK _PAGE_CACHE + +#define _CACHE_CACHED (_PAGE_CACHE | _PAGE_BUF) +#define _CACHE_UNCACHED (0) + +#define _PAGE_PROT_NONE _PAGE_WRITE + +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTE: + * bit 0: _PAGE_GLOBAL (zero) + * bit 1: _PAGE_VALID (zero) + * bit 2 - 6: swap type + * bit 7: exclusive marker + * bit 8: swap offset[0] + * bit 9: _PAGE_WRITE (zero) + * bit 10: _PAGE_PRESENT (zero) + * bit 11 - 31: swap offset[1 - 21] + */ +#define __swp_type(x) (((x).val >> 2) & 0x1f) +#define __swp_offset(x) ((((x).val >> 8) & 0x1) | \ + (((x).val >> 10) & 0x3ffffe)) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type & 0x1f) << 2) | \ + ((offset & 0x1) << 8) | \ + ((offset & 0x3ffffe) << 10)}) + +#endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv2/inc/abi/reg_ops.h b/arch/csky/abiv2/inc/abi/reg_ops.h new file mode 100644 index 0000000000..49ba18a647 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/reg_ops.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_REG_OPS_H +#define __ABI_REG_OPS_H +#include + +static inline unsigned int mfcr_hint(void) +{ + return mfcr("cr31"); +} + +static inline unsigned int mfcr_ccr2(void) +{ + return mfcr("cr23"); +} +#endif /* __ABI_REG_OPS_H */ diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h new file mode 100644 index 0000000000..0933addbc2 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/regdef.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_REGDEF_H +#define __ASM_CSKY_REGDEF_H + +#ifdef __ASSEMBLY__ +#define syscallid r7 +#else +#define syscallid "r7" +#endif + +#define regs_syscallid(regs) regs->regs[3] +#define regs_fp(regs) regs->regs[4] + +/* + * PSR format: + * | 31 | 30-24 | 23-16 | 15 14 | 13-10 | 9 | 8-0 | + * S VEC TM MM + * + * S: Super Mode + * VEC: Exception Number + * TM: Trace Mode + * MM: Memory unaligned addr access + */ +#define DEFAULT_PSR_VALUE 0x80000200 + +#define SYSTRACE_SAVENUM 5 + +#define TRAP0_SIZE 4 + +#endif /* __ASM_CSKY_REGDEF_H */ diff --git a/arch/csky/abiv2/inc/abi/string.h b/arch/csky/abiv2/inc/abi/string.h new file mode 100644 index 0000000000..f01bad2ac4 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/string.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_STRING_H +#define __ABI_CSKY_STRING_H + +#define __HAVE_ARCH_MEMCMP +extern int memcmp(const void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +#define __HAVE_ARCH_STRCMP +extern int strcmp(const char *, const char *); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *, const char *); + +#define __HAVE_ARCH_STRLEN +extern __kernel_size_t strlen(const char *); + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv2/inc/abi/switch_context.h b/arch/csky/abiv2/inc/abi/switch_context.h new file mode 100644 index 0000000000..5dd5c3f4ee --- /dev/null +++ b/arch/csky/abiv2/inc/abi/switch_context.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_PTRACE_H +#define __ABI_CSKY_PTRACE_H + +struct switch_stack { +#ifdef CONFIG_CPU_HAS_HILO + unsigned long rhi; + unsigned long rlo; + unsigned long cr14; + unsigned long pad; +#endif + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r15; + unsigned long r16; + unsigned long r17; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long r29; + unsigned long r30; +}; +#endif /* __ABI_CSKY_PTRACE_H */ diff --git a/arch/csky/abiv2/inc/abi/vdso.h b/arch/csky/abiv2/inc/abi/vdso.h new file mode 100644 index 0000000000..40fd10d893 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/vdso.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_VDSO_H +#define __ABI_CSKY_VDSO_H + +/* movi r7, 173 */ +#define SET_SYSCALL_ID .long 0x008bea07 + +#endif /* __ABI_CSKY_VDSO_H */ diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S new file mode 100644 index 0000000000..d745e10c10 --- /dev/null +++ b/arch/csky/abiv2/mcount.S @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +/* + * csky-gcc with -pg will put the following asm after prologue: + * push r15 + * jsri _mcount + * + * stack layout after mcount_enter in _mcount(): + * + * current sp => 0:+-------+ + * | a0-a3 | -> must save all argument regs + * +16:+-------+ + * | lr | -> _mcount lr (instrumente function's pc) + * +20:+-------+ + * | fp=r8 | -> instrumented function fp + * +24:+-------+ + * | plr | -> instrumented function lr (parent's pc) + * +-------+ + */ + +.macro mcount_enter + subi sp, 24 + stw a0, (sp, 0) + stw a1, (sp, 4) + stw a2, (sp, 8) + stw a3, (sp, 12) + stw lr, (sp, 16) + stw r8, (sp, 20) +.endm + +.macro mcount_exit + ldw a0, (sp, 0) + ldw a1, (sp, 4) + ldw a2, (sp, 8) + ldw a3, (sp, 12) + ldw t1, (sp, 16) + ldw r8, (sp, 20) + ldw lr, (sp, 24) + addi sp, 28 + jmp t1 +.endm + +.macro mcount_enter_regs + subi sp, 8 + stw lr, (sp, 0) + stw r8, (sp, 4) + SAVE_REGS_FTRACE +.endm + +.macro mcount_exit_regs + RESTORE_REGS_FTRACE + subi sp, 152 + ldw t1, (sp, 4) + addi sp, 152 + ldw r8, (sp, 4) + ldw lr, (sp, 8) + addi sp, 12 + jmp t1 +.endm + +.macro save_return_regs + subi sp, 16 + stw a0, (sp, 0) + stw a1, (sp, 4) + stw a2, (sp, 8) + stw a3, (sp, 12) +.endm + +.macro restore_return_regs + mov lr, a0 + ldw a0, (sp, 0) + ldw a1, (sp, 4) + ldw a2, (sp, 8) + ldw a3, (sp, 12) + addi sp, 16 +.endm + +.macro nop32_stub + nop32 + nop32 + nop32 +.endm + +ENTRY(ftrace_stub) + jmp lr +END(ftrace_stub) + +#ifndef CONFIG_DYNAMIC_FTRACE +ENTRY(_mcount) + mcount_enter + + /* r26 is link register, only used with jsri translation */ + lrw r26, ftrace_trace_function + ldw r26, (r26, 0) + lrw a1, ftrace_stub + cmpne r26, a1 + bf skip_ftrace + + mov a0, lr + subi a0, 4 + ldw a1, (sp, 24) + lrw a2, function_trace_op + ldw a2, (a2, 0) + + jsr r26 + +#ifndef CONFIG_FUNCTION_GRAPH_TRACER +skip_ftrace: + mcount_exit +#else +skip_ftrace: + lrw a0, ftrace_graph_return + ldw a0, (a0, 0) + lrw a1, ftrace_stub + cmpne a0, a1 + bt ftrace_graph_caller + + lrw a0, ftrace_graph_entry + ldw a0, (a0, 0) + lrw a1, ftrace_graph_entry_stub + cmpne a0, a1 + bt ftrace_graph_caller + + mcount_exit +#endif +END(_mcount) +#else /* CONFIG_DYNAMIC_FTRACE */ +ENTRY(_mcount) + mov t1, lr + ldw lr, (sp, 0) + addi sp, 4 + jmp t1 +ENDPROC(_mcount) + +ENTRY(ftrace_caller) + mcount_enter + + ldw a0, (sp, 16) + subi a0, 4 + ldw a1, (sp, 24) + lrw a2, function_trace_op + ldw a2, (a2, 0) + + nop +GLOBAL(ftrace_call) + nop32_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + nop +GLOBAL(ftrace_graph_call) + nop32_stub +#endif + + mcount_exit +ENDPROC(ftrace_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + mov a0, sp + addi a0, 24 + ldw a1, (sp, 16) + subi a1, 4 + mov a2, r8 + lrw r26, prepare_ftrace_return + jsr r26 + mcount_exit +END(ftrace_graph_caller) + +ENTRY(return_to_handler) + save_return_regs + mov a0, r8 + jsri ftrace_return_to_handler + restore_return_regs + jmp lr +END(return_to_handler) +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +ENTRY(ftrace_regs_caller) + mcount_enter_regs + + lrw t1, PT_FRAME_SIZE + add t1, sp + + ldw a0, (t1, 0) + subi a0, 4 + ldw a1, (t1, 8) + lrw a2, function_trace_op + ldw a2, (a2, 0) + mov a3, sp + + nop +GLOBAL(ftrace_regs_call) + nop32_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + nop +GLOBAL(ftrace_graph_regs_call) + nop32_stub +#endif + + mcount_exit_regs +ENDPROC(ftrace_regs_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/csky/abiv2/memcmp.S b/arch/csky/abiv2/memcmp.S new file mode 100644 index 0000000000..bf0d809f09 --- /dev/null +++ b/arch/csky/abiv2/memcmp.S @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(memcmp) + /* Test if len less than 4 bytes. */ + mov r3, r0 + movi r0, 0 + mov r12, r4 + cmplti r2, 4 + bt .L_compare_by_byte + + andi r13, r0, 3 + movi r19, 4 + + /* Test if s1 is not 4 bytes aligned. */ + bnez r13, .L_s1_not_aligned + + LABLE_ALIGN +.L_s1_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_compare_by_word + +.L_compare_by_4word: + /* If aligned, load word each time. */ + ldw r20, (r3, 0) + ldw r21, (r1, 0) + /* If s1[i] != s2[i], goto .L_byte_check. */ + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 4) + ldw r21, (r1, 4) + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 8) + ldw r21, (r1, 8) + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 12) + ldw r21, (r1, 12) + cmpne r20, r21 + bt .L_byte_check + + PRE_BNEZAD (r18) + addi a3, 16 + addi a1, 16 + + BNEZAD (r18, .L_compare_by_4word) + +.L_compare_by_word: + zext r18, r2, 3, 2 + bez r18, .L_compare_by_byte +.L_compare_by_word_loop: + ldw r20, (r3, 0) + ldw r21, (r1, 0) + addi r3, 4 + PRE_BNEZAD (r18) + cmpne r20, r21 + addi r1, 4 + bt .L_byte_check + BNEZAD (r18, .L_compare_by_word_loop) + +.L_compare_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_compare_by_byte_loop: + ldb r0, (r3, 0) + ldb r4, (r1, 0) + addi r3, 1 + subu r0, r4 + PRE_BNEZAD (r18) + addi r1, 1 + bnez r0, .L_return + BNEZAD (r18, .L_compare_by_byte_loop) + +.L_return: + mov r4, r12 + rts + +# ifdef __CSKYBE__ +/* d[i] != s[i] in word, so we check byte 0. */ +.L_byte_check: + xtrb0 r0, r20 + xtrb0 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 1 */ + xtrb1 r0, r20 + xtrb1 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 2 */ + xtrb2 r0, r20 + xtrb2 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 3 */ + xtrb3 r0, r20 + xtrb3 r2, r21 + subu r0, r2 +# else +/* s1[i] != s2[i] in word, so we check byte 3. */ +.L_byte_check: + xtrb3 r0, r20 + xtrb3 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 2 */ + xtrb2 r0, r20 + xtrb2 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 1 */ + xtrb1 r0, r20 + xtrb1 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 0 */ + xtrb0 r0, r20 + xtrb0 r2, r21 + subu r0, r2 + br .L_return +# endif /* !__CSKYBE__ */ + +/* Compare when s1 is not aligned. */ +.L_s1_not_aligned: + sub r13, r19, r13 + sub r2, r13 +.L_s1_not_aligned_loop: + ldb r0, (r3, 0) + ldb r4, (r1, 0) + addi r3, 1 + subu r0, r4 + PRE_BNEZAD (r13) + addi r1, 1 + bnez r0, .L_return + BNEZAD (r13, .L_s1_not_aligned_loop) + br .L_s1_aligned +ENDPROC(memcmp) diff --git a/arch/csky/abiv2/memcpy.S b/arch/csky/abiv2/memcpy.S new file mode 100644 index 0000000000..145bf3a936 --- /dev/null +++ b/arch/csky/abiv2/memcpy.S @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(__memcpy) +ENTRY(memcpy) + /* Test if len less than 4 bytes. */ + mov r12, r0 + cmplti r2, 4 + bt .L_copy_by_byte + + andi r13, r0, 3 + movi r19, 4 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + +/* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + movi r19, 0 + + LABLE_ALIGN +.L_len_larger_16bytes: +#if defined(__CK860__) + ldw r3, (r1, 0) + stw r3, (r0, 0) + ldw r3, (r1, 4) + stw r3, (r0, 4) + ldw r3, (r1, 8) + stw r3, (r0, 8) + ldw r3, (r1, 12) + addi r1, 16 + stw r3, (r0, 12) + addi r0, 16 +#else + ldw r20, (r1, 0) + ldw r21, (r1, 4) + ldw r22, (r1, 8) + ldw r23, (r1, 12) + stw r20, (r0, 0) + stw r21, (r0, 4) + stw r22, (r0, 8) + stw r23, (r0, 12) + PRE_BNEZAD (r18) + addi r1, 16 + addi r0, 16 +#endif + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + bez r18, .L_copy_by_byte +.L_len_less_16bytes_loop: + ldw r3, (r1, 0) + PRE_BNEZAD (r18) + addi r1, 4 + stw r3, (r0, 0) + addi r0, 4 + BNEZAD (r18, .L_len_less_16bytes_loop) + +/* Test if len less than 4 bytes. */ +.L_copy_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_copy_by_byte_loop: + ldb r3, (r1, 0) + PRE_BNEZAD (r18) + addi r1, 1 + stb r3, (r0, 0) + addi r0, 1 + BNEZAD (r18, .L_copy_by_byte_loop) + +.L_return: + mov r0, r12 + rts + +/* + * If dest is not aligned, just copying some bytes makes the + * dest align. + */ +.L_dest_not_aligned: + sub r13, r19, r13 + sub r2, r13 + +/* Makes the dest align. */ +.L_dest_not_aligned_loop: + ldb r3, (r1, 0) + PRE_BNEZAD (r13) + addi r1, 1 + stb r3, (r0, 0) + addi r0, 1 + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 4 + bt .L_copy_by_byte + + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(__memcpy) diff --git a/arch/csky/abiv2/memmove.S b/arch/csky/abiv2/memmove.S new file mode 100644 index 0000000000..5721e73ad3 --- /dev/null +++ b/arch/csky/abiv2/memmove.S @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + + .weak memmove +ENTRY(__memmove) +ENTRY(memmove) + subu r3, r0, r1 + cmphs r3, r2 + bt memcpy + + mov r12, r0 + addu r0, r0, r2 + addu r1, r1, r2 + + /* Test if len less than 4 bytes. */ + cmplti r2, 4 + bt .L_copy_by_byte + + andi r13, r0, 3 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + /* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + movi r19, 0 + + /* len > 16 bytes */ + LABLE_ALIGN +.L_len_larger_16bytes: + subi r1, 16 + subi r0, 16 +#if defined(__CK860__) + ldw r3, (r1, 12) + stw r3, (r0, 12) + ldw r3, (r1, 8) + stw r3, (r0, 8) + ldw r3, (r1, 4) + stw r3, (r0, 4) + ldw r3, (r1, 0) + stw r3, (r0, 0) +#else + ldw r20, (r1, 0) + ldw r21, (r1, 4) + ldw r22, (r1, 8) + ldw r23, (r1, 12) + stw r20, (r0, 0) + stw r21, (r0, 4) + stw r22, (r0, 8) + stw r23, (r0, 12) + PRE_BNEZAD (r18) +#endif + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + bez r18, .L_copy_by_byte +.L_len_less_16bytes_loop: + subi r1, 4 + subi r0, 4 + ldw r3, (r1, 0) + PRE_BNEZAD (r18) + stw r3, (r0, 0) + BNEZAD (r18, .L_len_less_16bytes_loop) + + /* Test if len less than 4 bytes. */ +.L_copy_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_copy_by_byte_loop: + subi r1, 1 + subi r0, 1 + ldb r3, (r1, 0) + PRE_BNEZAD (r18) + stb r3, (r0, 0) + BNEZAD (r18, .L_copy_by_byte_loop) + +.L_return: + mov r0, r12 + rts + + /* If dest is not aligned, just copy some bytes makes the dest + align. */ +.L_dest_not_aligned: + sub r2, r13 +.L_dest_not_aligned_loop: + subi r1, 1 + subi r0, 1 + /* Makes the dest align. */ + ldb r3, (r1, 0) + PRE_BNEZAD (r13) + stb r3, (r0, 0) + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 4 + bt .L_copy_by_byte + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/csky/abiv2/memset.S b/arch/csky/abiv2/memset.S new file mode 100644 index 0000000000..a7e7d994b6 --- /dev/null +++ b/arch/csky/abiv2/memset.S @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + + .weak memset +ENTRY(__memset) +ENTRY(memset) + /* Test if len less than 4 bytes. */ + mov r12, r0 + cmplti r2, 8 + bt .L_set_by_byte + + andi r13, r0, 3 + movi r19, 4 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + /* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + zextb r3, r1 + lsli r1, 8 + or r1, r3 + lsli r3, r1, 16 + or r3, r1 + + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + + LABLE_ALIGN +.L_len_larger_16bytes: + stw r3, (r0, 0) + stw r3, (r0, 4) + stw r3, (r0, 8) + stw r3, (r0, 12) + PRE_BNEZAD (r18) + addi r0, 16 + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + andi r2, 3 + bez r18, .L_set_by_byte +.L_len_less_16bytes_loop: + stw r3, (r0, 0) + PRE_BNEZAD (r18) + addi r0, 4 + BNEZAD (r18, .L_len_less_16bytes_loop) + + /* Test if len less than 4 bytes. */ +.L_set_by_byte: + zext r18, r2, 2, 0 + bez r18, .L_return +.L_set_by_byte_loop: + stb r1, (r0, 0) + PRE_BNEZAD (r18) + addi r0, 1 + BNEZAD (r18, .L_set_by_byte_loop) + +.L_return: + mov r0, r12 + rts + + /* If dest is not aligned, just set some bytes makes the dest + align. */ + +.L_dest_not_aligned: + sub r13, r19, r13 + sub r2, r13 +.L_dest_not_aligned_loop: + /* Makes the dest align. */ + stb r1, (r0, 0) + PRE_BNEZAD (r13) + addi r0, 1 + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 8 + bt .L_set_by_byte + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(memset) +ENDPROC(__memset) diff --git a/arch/csky/abiv2/strcmp.S b/arch/csky/abiv2/strcmp.S new file mode 100644 index 0000000000..f8403f4d8c --- /dev/null +++ b/arch/csky/abiv2/strcmp.S @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(strcmp) + mov a3, a0 + /* Check if the s1 addr is aligned. */ + xor a2, a3, a1 + andi a2, 0x3 + bnez a2, 7f + andi t1, a0, 0x3 + bnez t1, 5f + +1: + /* If aligned, load word each time. */ + ldw t0, (a3, 0) + ldw t1, (a1, 0) + /* If s1[i] != s2[i], goto 2f. */ + cmpne t0, t1 + bt 2f + /* If s1[i] == s2[i], check if s1 or s2 is at the end. */ + tstnbz t0 + /* If at the end, goto 3f (finish comparing). */ + bf 3f + + ldw t0, (a3, 4) + ldw t1, (a1, 4) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 8) + ldw t1, (a1, 8) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 12) + ldw t1, (a1, 12) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 16) + ldw t1, (a1, 16) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 20) + ldw t1, (a1, 20) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 24) + ldw t1, (a1, 24) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 28) + ldw t1, (a1, 28) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + addi a3, 32 + addi a1, 32 + + br 1b + +# ifdef __CSKYBE__ + /* d[i] != s[i] in word, so we check byte 0. */ +2: + xtrb0 a0, t0 + xtrb0 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 1 */ + xtrb1 a0, t0 + xtrb1 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 2 */ + xtrb2 a0, t0 + xtrb2 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 3 */ + xtrb3 a0, t0 + xtrb3 a2, t1 + subu a0, a2 +# else + /* s1[i] != s2[i] in word, so we check byte 3. */ +2: + xtrb3 a0, t0 + xtrb3 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 2 */ + xtrb2 a0, t0 + xtrb2 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 1 */ + xtrb1 a0, t0 + xtrb1 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 0 */ + xtrb0 a0, t0 + xtrb0 a2, t1 + subu a0, a2 + +# endif /* !__CSKYBE__ */ + jmp lr +3: + movi a0, 0 +4: + jmp lr + + /* Compare when s1 or s2 is not aligned. */ +5: + subi t1, 4 +6: + ldb a0, (a3, 0) + ldb a2, (a1, 0) + subu a0, a2 + bez a2, 4b + bnez a0, 4b + addi t1, 1 + addi a1, 1 + addi a3, 1 + bnez t1, 6b + br 1b + +7: + ldb a0, (a3, 0) + addi a3, 1 + ldb a2, (a1, 0) + addi a1, 1 + subu a0, a2 + bnez a0, 4b + bnez a2, 7b + jmp r15 +ENDPROC(strcmp) diff --git a/arch/csky/abiv2/strcpy.S b/arch/csky/abiv2/strcpy.S new file mode 100644 index 0000000000..3c6d3f6a57 --- /dev/null +++ b/arch/csky/abiv2/strcpy.S @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(strcpy) + mov a3, a0 + /* Check if the src addr is aligned. */ + andi t0, a1, 3 + bnez t0, 11f +1: + /* Check if all the bytes in the word are not zero. */ + ldw a2, (a1) + tstnbz a2 + bf 9f + stw a2, (a3) + + ldw a2, (a1, 4) + tstnbz a2 + bf 2f + stw a2, (a3, 4) + + ldw a2, (a1, 8) + tstnbz a2 + bf 3f + stw a2, (a3, 8) + + ldw a2, (a1, 12) + tstnbz a2 + bf 4f + stw a2, (a3, 12) + + ldw a2, (a1, 16) + tstnbz a2 + bf 5f + stw a2, (a3, 16) + + ldw a2, (a1, 20) + tstnbz a2 + bf 6f + stw a2, (a3, 20) + + ldw a2, (a1, 24) + tstnbz a2 + bf 7f + stw a2, (a3, 24) + + ldw a2, (a1, 28) + tstnbz a2 + bf 8f + stw a2, (a3, 28) + + addi a3, 32 + addi a1, 32 + br 1b + + +2: + addi a3, 4 + br 9f + +3: + addi a3, 8 + br 9f + +4: + addi a3, 12 + br 9f + +5: + addi a3, 16 + br 9f + +6: + addi a3, 20 + br 9f + +7: + addi a3, 24 + br 9f + +8: + addi a3, 28 +9: +# ifdef __CSKYBE__ + xtrb0 t0, a2 + st.b t0, (a3) + bez t0, 10f + xtrb1 t0, a2 + st.b t0, (a3, 1) + bez t0, 10f + xtrb2 t0, a2 + st.b t0, (a3, 2) + bez t0, 10f + stw a2, (a3) +# else + xtrb3 t0, a2 + st.b t0, (a3) + bez t0, 10f + xtrb2 t0, a2 + st.b t0, (a3, 1) + bez t0, 10f + xtrb1 t0, a2 + st.b t0, (a3, 2) + bez t0, 10f + stw a2, (a3) +# endif /* !__CSKYBE__ */ +10: + jmp lr + +11: + subi t0, 4 +12: + ld.b a2, (a1) + st.b a2, (a3) + bez a2, 10b + addi t0, 1 + addi a1, a1, 1 + addi a3, a3, 1 + bnez t0, 12b + jbr 1b +ENDPROC(strcpy) diff --git a/arch/csky/abiv2/strksyms.c b/arch/csky/abiv2/strksyms.c new file mode 100644 index 0000000000..8d1fd28c6c --- /dev/null +++ b/arch/csky/abiv2/strksyms.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_STRING_OPS +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memmove); +#endif +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strlen); diff --git a/arch/csky/abiv2/strlen.S b/arch/csky/abiv2/strlen.S new file mode 100644 index 0000000000..bcdd70764d --- /dev/null +++ b/arch/csky/abiv2/strlen.S @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(strlen) + /* Check if the start addr is aligned. */ + mov r3, r0 + andi r1, r0, 3 + movi r2, 4 + movi r0, 0 + bnez r1, .L_start_not_aligned + + LABLE_ALIGN +.L_start_addr_aligned: + /* Check if all the bytes in the word are not zero. */ + ldw r1, (r3) + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 4) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 8) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 12) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 16) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 20) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 24) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 28) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + addi r0, 4 + addi r3, 32 + br .L_start_addr_aligned + +.L_string_tail: +# ifdef __CSKYBE__ + xtrb0 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb1 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb2 r3, r1 + bez r3, .L_return + addi r0, 1 +# else + xtrb3 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb2 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb1 r3, r1 + bez r3, .L_return + addi r0, 1 +# endif /* !__CSKYBE__ */ + +.L_return: + rts + +.L_start_not_aligned: + sub r2, r2, r1 +.L_start_not_aligned_loop: + ldb r1, (r3) + PRE_BNEZAD (r2) + addi r3, 1 + bez r1, .L_return + addi r0, 1 + BNEZAD (r2, .L_start_not_aligned_loop) + br .L_start_addr_aligned +ENDPROC(strlen) diff --git a/arch/csky/abiv2/sysdep.h b/arch/csky/abiv2/sysdep.h new file mode 100644 index 0000000000..61abe9201c --- /dev/null +++ b/arch/csky/abiv2/sysdep.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __SYSDEP_H +#define __SYSDEP_H + +#ifdef __ASSEMBLER__ + +#if defined(__CK860__) +#define LABLE_ALIGN \ + .balignw 16, 0x6c03 + +#define PRE_BNEZAD(R) + +#define BNEZAD(R, L) \ + bnezad R, L +#else +#define LABLE_ALIGN \ + .balignw 8, 0x6c03 + +#define PRE_BNEZAD(R) \ + subi R, 1 + +#define BNEZAD(R, L) \ + bnez R, L +#endif + +#endif + +#endif diff --git a/arch/csky/boot/Makefile b/arch/csky/boot/Makefile new file mode 100644 index 0000000000..c3cfde28f8 --- /dev/null +++ b/arch/csky/boot/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-only +targets := Image zImage uImage + +$(obj)/Image: vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +compress-$(CONFIG_KERNEL_GZIP) = gzip +compress-$(CONFIG_KERNEL_LZO) = lzo +compress-$(CONFIG_KERNEL_LZMA) = lzma +compress-$(CONFIG_KERNEL_XZ) = xzkern +compress-$(CONFIG_KERNEL_LZ4) = lz4 + +$(obj)/zImage: $(obj)/Image FORCE + $(call if_changed,$(compress-y)) + @echo ' Kernel: $@ is ready' + +UIMAGE_ARCH = sandbox +UIMAGE_COMPRESSION = $(compress-y) +UIMAGE_LOADADDR = $(shell $(NM) vmlinux | awk '$$NF == "_start" {print $$1}') + +$(obj)/uImage: $(obj)/zImage + $(call if_changed,uimage) + @echo 'Image: $@ is ready' diff --git a/arch/csky/boot/dts/Makefile b/arch/csky/boot/dts/Makefile new file mode 100644 index 0000000000..5f1f55e911 --- /dev/null +++ b/arch/csky/boot/dts/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +dtstree := $(srctree)/$(src) + +dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) diff --git a/arch/csky/configs/defconfig b/arch/csky/configs/defconfig new file mode 100644 index 0000000000..af722e4dfb --- /dev/null +++ b/arch/csky/configs/defconfig @@ -0,0 +1,53 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="csky" +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_GENERIC_PHY=y +CONFIG_EXT4_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_CRAMFS=y +CONFIG_ROMFS_FS=y +CONFIG_NFS_FS=y +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild new file mode 100644 index 0000000000..1117c28cb7 --- /dev/null +++ b/arch/csky/include/asm/Kbuild @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += asm-offsets.h +generic-y += extable.h +generic-y += gpio.h +generic-y += kvm_para.h +generic-y += mcs_spinlock.h +generic-y += qrwlock.h +generic-y += qrwlock_types.h +generic-y += qspinlock.h +generic-y += parport.h +generic-y += user.h +generic-y += vmlinux.lds.h diff --git a/arch/csky/include/asm/addrspace.h b/arch/csky/include/asm/addrspace.h new file mode 100644 index 0000000000..6fc05d4453 --- /dev/null +++ b/arch/csky/include/asm/addrspace.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ADDRSPACE_H +#define __ASM_CSKY_ADDRSPACE_H + +#define KSEG0 0x80000000ul +#define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0) + +#endif /* __ASM_CSKY_ADDRSPACE_H */ diff --git a/arch/csky/include/asm/asid.h b/arch/csky/include/asm/asid.h new file mode 100644 index 0000000000..6ff205a97a --- /dev/null +++ b/arch/csky/include/asm/asid.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ASM_ASID_H +#define __ASM_ASM_ASID_H + +#include +#include +#include +#include +#include + +struct asid_info +{ + atomic64_t generation; + unsigned long *map; + atomic64_t __percpu *active; + u64 __percpu *reserved; + u32 bits; + /* Lock protecting the structure */ + raw_spinlock_t lock; + /* Which CPU requires context flush on next call */ + cpumask_t flush_pending; + /* Number of ASID allocated by context (shift value) */ + unsigned int ctxt_shift; + /* Callback to locally flush the context. */ + void (*flush_cpu_ctxt_cb)(void); +}; + +#define NUM_ASIDS(info) (1UL << ((info)->bits)) +#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift) + +#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu) + +void asid_new_context(struct asid_info *info, atomic64_t *pasid, + unsigned int cpu, struct mm_struct *mm); + +/* + * Check the ASID is still valid for the context. If not generate a new ASID. + * + * @pasid: Pointer to the current ASID batch + * @cpu: current CPU ID. Must have been acquired through get_cpu() + */ +static inline void asid_check_context(struct asid_info *info, + atomic64_t *pasid, unsigned int cpu, + struct mm_struct *mm) +{ + u64 asid, old_active_asid; + + asid = atomic64_read(pasid); + + /* + * The memory ordering here is subtle. + * If our active_asid is non-zero and the ASID matches the current + * generation, then we update the active_asid entry with a relaxed + * cmpxchg. Racing with a concurrent rollover means that either: + * + * - We get a zero back from the cmpxchg and end up waiting on the + * lock. Taking the lock synchronises with the rollover and so + * we are forced to see the updated generation. + * + * - We get a valid ASID back from the cmpxchg, which means the + * relaxed xchg in flush_context will treat us as reserved + * because atomic RmWs are totally ordered for a given location. + */ + old_active_asid = atomic64_read(&active_asid(info, cpu)); + if (old_active_asid && + !((asid ^ atomic64_read(&info->generation)) >> info->bits) && + atomic64_cmpxchg_relaxed(&active_asid(info, cpu), + old_active_asid, asid)) + return; + + asid_new_context(info, pasid, cpu, mm); +} + +int asid_allocator_init(struct asid_info *info, + u32 bits, unsigned int asid_per_ctxt, + void (*flush_cpu_ctxt_cb)(void)); + +#endif diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h new file mode 100644 index 0000000000..4dab44f614 --- /dev/null +++ b/arch/csky/include/asm/atomic.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ATOMIC_H +#define __ASM_CSKY_ATOMIC_H + +#ifdef CONFIG_SMP +#include + +#include +#include + +#define __atomic_acquire_fence() __bar_brarw() + +#define __atomic_release_fence() __bar_brwaw() + +static __always_inline int arch_atomic_read(const atomic_t *v) +{ + return READ_ONCE(v->counter); +} +static __always_inline void arch_atomic_set(atomic_t *v, int i) +{ + WRITE_ONCE(v->counter, i); +} + +#define ATOMIC_OP(op) \ +static __always_inline \ +void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%2) \n" \ + " " #op " %0, %1 \n" \ + " stex.w %0, (%2) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp) \ + : "r" (i), "r" (&v->counter) \ + : "memory"); \ +} + +ATOMIC_OP(add) +ATOMIC_OP(sub) +ATOMIC_OP(and) +ATOMIC_OP( or) +ATOMIC_OP(xor) + +#undef ATOMIC_OP + +#define ATOMIC_FETCH_OP(op) \ +static __always_inline \ +int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + register int ret, tmp; \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%3) \n" \ + " mov %1, %0 \n" \ + " " #op " %0, %2 \n" \ + " stex.w %0, (%3) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + return ret; \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static __always_inline \ +int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + return arch_atomic_fetch_##op##_relaxed(i, v) c_op i; \ +} + +#define ATOMIC_OPS(op, c_op) \ + ATOMIC_FETCH_OP(op) \ + ATOMIC_OP_RETURN(op, c_op) + +ATOMIC_OPS(add, +) +ATOMIC_OPS(sub, -) + +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed + +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN + +#define ATOMIC_OPS(op) \ + ATOMIC_FETCH_OP(op) + +ATOMIC_OPS(and) +ATOMIC_OPS( or) +ATOMIC_OPS(xor) + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#undef ATOMIC_OPS + +#undef ATOMIC_FETCH_OP + +static __always_inline int +arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int prev, tmp; + + __asm__ __volatile__ ( + RELEASE_FENCE + "1: ldex.w %0, (%3) \n" + " cmpne %0, %4 \n" + " bf 2f \n" + " mov %1, %0 \n" + " add %1, %2 \n" + " stex.w %1, (%3) \n" + " bez %1, 1b \n" + FULL_FENCE + "2:\n" + : "=&r" (prev), "=&r" (tmp) + : "r" (a), "r" (&v->counter), "r" (u) + : "memory"); + + return prev; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless + +static __always_inline bool +arch_atomic_inc_unless_negative(atomic_t *v) +{ + int rc, tmp; + + __asm__ __volatile__ ( + RELEASE_FENCE + "1: ldex.w %0, (%2) \n" + " movi %1, 0 \n" + " blz %0, 2f \n" + " movi %1, 1 \n" + " addi %0, 1 \n" + " stex.w %0, (%2) \n" + " bez %0, 1b \n" + FULL_FENCE + "2:\n" + : "=&r" (tmp), "=&r" (rc) + : "r" (&v->counter) + : "memory"); + + return tmp ? true : false; + +} +#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative + +static __always_inline bool +arch_atomic_dec_unless_positive(atomic_t *v) +{ + int rc, tmp; + + __asm__ __volatile__ ( + RELEASE_FENCE + "1: ldex.w %0, (%2) \n" + " movi %1, 0 \n" + " bhz %0, 2f \n" + " movi %1, 1 \n" + " subi %0, 1 \n" + " stex.w %0, (%2) \n" + " bez %0, 1b \n" + FULL_FENCE + "2:\n" + : "=&r" (tmp), "=&r" (rc) + : "r" (&v->counter) + : "memory"); + + return tmp ? true : false; +} +#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive + +static __always_inline int +arch_atomic_dec_if_positive(atomic_t *v) +{ + int dec, tmp; + + __asm__ __volatile__ ( + RELEASE_FENCE + "1: ldex.w %0, (%2) \n" + " subi %1, %0, 1 \n" + " blz %1, 2f \n" + " stex.w %1, (%2) \n" + " bez %1, 1b \n" + FULL_FENCE + "2:\n" + : "=&r" (dec), "=&r" (tmp) + : "r" (&v->counter) + : "memory"); + + return dec - 1; +} +#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive + +#else +#include +#endif + +#endif /* __ASM_CSKY_ATOMIC_H */ diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h new file mode 100644 index 0000000000..15de58b10a --- /dev/null +++ b/arch/csky/include/asm/barrier.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_BARRIER_H +#define __ASM_CSKY_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define nop() asm volatile ("nop\n":::"memory") + +#ifdef CONFIG_SMP + +/* + * bar.brwarws: ordering barrier for all load/store instructions + * before/after + * + * |31|30 26|25 21|20 16|15 10|9 5|4 0| + * 1 10000 00000 00000 100001 00001 0 bw br aw ar + * + * b: before + * a: after + * r: read + * w: write + * + * Here are all combinations: + * + * bar.brw + * bar.br + * bar.bw + * bar.arw + * bar.ar + * bar.aw + * bar.brwarw + * bar.brarw + * bar.bwarw + * bar.brwar + * bar.brwaw + * bar.brar + * bar.bwaw + */ +#define FULL_FENCE ".long 0x842fc000\n" +#define ACQUIRE_FENCE ".long 0x8427c000\n" +#define RELEASE_FENCE ".long 0x842ec000\n" + +#define __bar_brw() asm volatile (".long 0x842cc000\n":::"memory") +#define __bar_br() asm volatile (".long 0x8424c000\n":::"memory") +#define __bar_bw() asm volatile (".long 0x8428c000\n":::"memory") +#define __bar_arw() asm volatile (".long 0x8423c000\n":::"memory") +#define __bar_ar() asm volatile (".long 0x8421c000\n":::"memory") +#define __bar_aw() asm volatile (".long 0x8422c000\n":::"memory") +#define __bar_brwarw() asm volatile (FULL_FENCE:::"memory") +#define __bar_brarw() asm volatile (ACQUIRE_FENCE:::"memory") +#define __bar_bwarw() asm volatile (".long 0x842bc000\n":::"memory") +#define __bar_brwar() asm volatile (".long 0x842dc000\n":::"memory") +#define __bar_brwaw() asm volatile (RELEASE_FENCE:::"memory") +#define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory") +#define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory") +#define __bar_bwaw() asm volatile (".long 0x842ac000\n":::"memory") + +#define __smp_mb() __bar_brwarw() +#define __smp_rmb() __bar_brar() +#define __smp_wmb() __bar_bwaw() + +#define __smp_acquire_fence() __bar_brarw() +#define __smp_release_fence() __bar_brwaw() + +#endif /* CONFIG_SMP */ + +/* + * sync: completion barrier, all sync.xx instructions + * guarantee the last response received by bus transaction + * made by ld/st instructions before sync.s + * sync.s: inherit from sync, but also shareable to other cores + * sync.i: inherit from sync, but also flush cpu pipeline + * sync.is: the same with sync.i + sync.s + */ +#define mb() asm volatile ("sync\n":::"memory") + +#ifdef CONFIG_CPU_HAS_CACHEV2 +/* + * Using three sync.is to prevent speculative PTW + */ +#define sync_is() asm volatile ("sync.is\nsync.is\nsync.is\n":::"memory") +#endif + +#include + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CSKY_BARRIER_H */ diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h new file mode 100644 index 0000000000..72e1b2aa29 --- /dev/null +++ b/arch/csky/include/asm/bitops.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_BITOPS_H +#define __ASM_CSKY_BITOPS_H + +#include +#include + +/* + * asm-generic/bitops/ffs.h + */ +static inline int ffs(int x) +{ + if (!x) + return 0; + + asm volatile ( + "brev %0\n" + "ff1 %0\n" + "addi %0, 1\n" + : "=&r"(x) + : "0"(x)); + return x; +} + +/* + * asm-generic/bitops/__ffs.h + */ +static __always_inline unsigned long __ffs(unsigned long x) +{ + asm volatile ( + "brev %0\n" + "ff1 %0\n" + : "=&r"(x) + : "0"(x)); + return x; +} + +/* + * asm-generic/bitops/fls.h + */ +static __always_inline int fls(unsigned int x) +{ + asm volatile( + "ff1 %0\n" + : "=&r"(x) + : "0"(x)); + + return (32 - x); +} + +/* + * asm-generic/bitops/__fls.h + */ +static __always_inline unsigned long __fls(unsigned long x) +{ + return fls(x) - 1; +} + +#include +#include + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include +#include +#include + +/* + * bug fix, why only could use atomic!!!! + */ +#include + +#include +#include +#endif /* __ASM_CSKY_BITOPS_H */ diff --git a/arch/csky/include/asm/bug.h b/arch/csky/include/asm/bug.h new file mode 100644 index 0000000000..03f1a5f918 --- /dev/null +++ b/arch/csky/include/asm/bug.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_BUG_H +#define __ASM_CSKY_BUG_H + +#include +#include +#include + +#define BUG() \ +do { \ + asm volatile ("bkpt\n"); \ + unreachable(); \ +} while (0) + +#define HAVE_ARCH_BUG + +#include + +struct pt_regs; + +void die(struct pt_regs *regs, const char *str); +void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr); + +void show_regs(struct pt_regs *regs); +void show_code(struct pt_regs *regs); + +#endif /* __ASM_CSKY_BUG_H */ diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h new file mode 100644 index 0000000000..4b5c09bf1d --- /dev/null +++ b/arch/csky/include/asm/cache.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CACHE_H +#define __ASM_CSKY_CACHE_H + +/* bytes per L1 cache line */ +#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +#ifndef __ASSEMBLY__ + +void dcache_wb_line(unsigned long start); + +void icache_inv_range(unsigned long start, unsigned long end); +void icache_inv_all(void); +void local_icache_inv_all(void *priv); + +void dcache_wb_range(unsigned long start, unsigned long end); +void dcache_wbinv_all(void); + +void cache_wbinv_range(unsigned long start, unsigned long end); +void cache_wbinv_all(void); + +void dma_wbinv_range(unsigned long start, unsigned long end); +void dma_inv_range(unsigned long start, unsigned long end); +void dma_wb_range(unsigned long start, unsigned long end); + +#endif +#endif /* __ASM_CSKY_CACHE_H */ diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h new file mode 100644 index 0000000000..d0f9eafe89 --- /dev/null +++ b/arch/csky/include/asm/cacheflush.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CACHEFLUSH_H +#define __ASM_CSKY_CACHEFLUSH_H + +#include +#include + +#endif /* __ASM_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/include/asm/checksum.h b/arch/csky/include/asm/checksum.h new file mode 100644 index 0000000000..aa12ef4b90 --- /dev/null +++ b/arch/csky/include/asm/checksum.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CHECKSUM_H +#define __ASM_CSKY_CHECKSUM_H + +#include +#include + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 tmp; + + asm volatile( + "mov %1, %0\n" + "rori %0, 16\n" + "addu %0, %1\n" + "lsri %0, 16\n" + : "=r"(csum), "=r"(tmp) + : "0"(csum)); + + return (__force __sum16) ~csum; +} +#define csum_fold csum_fold + +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, unsigned short proto, __wsum sum) +{ + asm volatile( + "clrc\n" + "addc %0, %1\n" + "addc %0, %2\n" + "addc %0, %3\n" + "inct %0\n" + : "=r"(sum) + : "r"((__force u32)saddr), "r"((__force u32)daddr), +#ifdef __BIG_ENDIAN + "r"(proto + len), +#else + "r"((proto + len) << 8), +#endif + "0" ((__force unsigned long)sum) + : "cc"); + return sum; +} +#define csum_tcpudp_nofold csum_tcpudp_nofold + +#include + +#endif /* __ASM_CSKY_CHECKSUM_H */ diff --git a/arch/csky/include/asm/clocksource.h b/arch/csky/include/asm/clocksource.h new file mode 100644 index 0000000000..54da0e49ef --- /dev/null +++ b/arch/csky/include/asm/clocksource.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H +#define __ASM_VDSO_CSKY_CLOCKSOURCE_H + +#include + +#endif diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h new file mode 100644 index 0000000000..916043b845 --- /dev/null +++ b/arch/csky/include/asm/cmpxchg.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CMPXCHG_H +#define __ASM_CSKY_CMPXCHG_H + +#ifdef CONFIG_SMP +#include +#include + +#define __xchg_relaxed(new, ptr, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + unsigned long tmp; \ + switch (size) { \ + case 2: { \ + u32 ret; \ + u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \ + u32 mask = 0xffff << shif; \ + __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%4)\n" \ + " and %1, %0, %2\n" \ + " or %1, %1, %3\n" \ + " stex.w %1, (%4)\n" \ + " bez %1, 1b\n" \ + : "=&r" (ret), "=&r" (tmp) \ + : "r" (~mask), \ + "r" ((u32)__new << shif), \ + "r" (__ptr) \ + : "memory"); \ + __ret = (__typeof__(*(ptr))) \ + ((ret & mask) >> shif); \ + break; \ + } \ + case 4: \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + : "=&r" (__ret), "=&r" (tmp) \ + : "r" (__new), "r"(__ptr) \ + :); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_xchg_relaxed(ptr, x) \ + (__xchg_relaxed((x), (ptr), sizeof(*(ptr)))) + +#define __cmpxchg_relaxed(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(new) __tmp; \ + __typeof__(old) __old = (old); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " cmpne %0, %4 \n" \ + " bt 2f \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + "2: \n" \ + : "=&r" (__ret), "=&r" (__tmp) \ + : "r" (__new), "r"(__ptr), "r"(__old) \ + :); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_cmpxchg_relaxed(ptr, o, n) \ + (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) + +#define __cmpxchg_acquire(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(new) __tmp; \ + __typeof__(old) __old = (old); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " cmpne %0, %4 \n" \ + " bt 2f \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + ACQUIRE_FENCE \ + "2: \n" \ + : "=&r" (__ret), "=&r" (__tmp) \ + : "r" (__new), "r"(__ptr), "r"(__old) \ + :); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_cmpxchg_acquire(ptr, o, n) \ + (__cmpxchg_acquire((ptr), (o), (n), sizeof(*(ptr)))) + +#define __cmpxchg(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(new) __tmp; \ + __typeof__(old) __old = (old); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + asm volatile ( \ + RELEASE_FENCE \ + "1: ldex.w %0, (%3) \n" \ + " cmpne %0, %4 \n" \ + " bt 2f \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + FULL_FENCE \ + "2: \n" \ + : "=&r" (__ret), "=&r" (__tmp) \ + : "r" (__new), "r"(__ptr), "r"(__old) \ + :); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_cmpxchg(ptr, o, n) \ + (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) + +#define arch_cmpxchg_local(ptr, o, n) \ + (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) +#else +#include +#endif + +#endif /* __ASM_CSKY_CMPXCHG_H */ diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h new file mode 100644 index 0000000000..48b83e283e --- /dev/null +++ b/arch/csky/include/asm/elf.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ELF_H +#define __ASM_CSKY_ELF_H + +#include +#include + +#define ELF_ARCH EM_CSKY +#define EM_CSKY_OLD 39 + +/* CSKY Relocations */ +#define R_CSKY_NONE 0 +#define R_CSKY_32 1 +#define R_CSKY_PCIMM8BY4 2 +#define R_CSKY_PCIMM11BY2 3 +#define R_CSKY_PCIMM4BY2 4 +#define R_CSKY_PC32 5 +#define R_CSKY_PCRELJSR_IMM11BY2 6 +#define R_CSKY_GNU_VTINHERIT 7 +#define R_CSKY_GNU_VTENTRY 8 +#define R_CSKY_RELATIVE 9 +#define R_CSKY_COPY 10 +#define R_CSKY_GLOB_DAT 11 +#define R_CSKY_JUMP_SLOT 12 +#define R_CSKY_ADDR_HI16 24 +#define R_CSKY_ADDR_LO16 25 +#define R_CSKY_PCRELJSR_IMM26BY2 40 + +typedef unsigned long elf_greg_t; + +typedef struct user_fp elf_fpregset_t; + +/* + * In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of + * elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough + * for coredump and no need full sizeof(struct pt_regs). + */ +#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2) + +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) || \ + ((x)->e_machine == EM_CSKY_OLD)) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_EXEC_PAGESIZE 4096 +#define ELF_CLASS ELFCLASS32 +#define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; } + +#ifdef __cskyBE__ +#define ELF_DATA ELFDATA2MSB +#else +#define ELF_DATA ELFDATA2LSB +#endif + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ +#define ELF_ET_DYN_BASE 0x0UL +#include + +/* Similar, but for a thread other than current. */ +struct task_struct; +extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs); +#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) + +#define ELF_HWCAP (0) + +/* + * This yields a string that ld.so will use to load implementation specific + * libraries for optimization. This is more specific in intent than poking + * at uname or /proc/cpuinfo. + */ +#define ELF_PLATFORM (NULL) +#define SET_PERSONALITY(ex) set_personality(PER_LINUX) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); +#endif /* __ASM_CSKY_ELF_H */ diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h new file mode 100644 index 0000000000..49a77cbbe2 --- /dev/null +++ b/arch/csky/include/asm/fixmap.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_FIXMAP_H +#define __ASM_CSKY_FIXMAP_H + +#include +#include +#ifdef CONFIG_HIGHMEM +#include +#include +#endif + +enum fixed_addresses { +#ifdef CONFIG_HAVE_TCM + FIX_TCM = TCM_NR_PAGES, +#endif +#ifdef CONFIG_HIGHMEM + FIX_KMAP_BEGIN, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1, +#endif + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#include + +extern void fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base); +extern void __init fixaddr_init(void); + +#endif /* __ASM_CSKY_FIXMAP_H */ diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h new file mode 100644 index 0000000000..9b86341731 --- /dev/null +++ b/arch/csky/include/asm/ftrace.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_FTRACE_H +#define __ASM_CSKY_FTRACE_H + +#define MCOUNT_INSN_SIZE 14 + +#define HAVE_FUNCTION_GRAPH_FP_TEST + +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + +#define ARCH_SUPPORTS_FTRACE_OPS 1 + +#define MCOUNT_ADDR ((unsigned long)_mcount) + +#ifndef __ASSEMBLY__ + +extern void _mcount(unsigned long); + +extern void ftrace_graph_call(void); + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +struct dyn_arch_ftrace { +}; +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_CSKY_FTRACE_H */ diff --git a/arch/csky/include/asm/futex.h b/arch/csky/include/asm/futex.h new file mode 100644 index 0000000000..6cfd312723 --- /dev/null +++ b/arch/csky/include/asm/futex.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_FUTEX_H +#define __ASM_CSKY_FUTEX_H + +#ifndef CONFIG_SMP +#include +#else +#include +#include +#include +#include + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +{ \ + u32 tmp; \ + \ + __atomic_pre_full_fence(); \ + \ + __asm__ __volatile__ ( \ + "1: ldex.w %[ov], %[u] \n" \ + " "insn" \n" \ + "2: stex.w %[t], %[u] \n" \ + " bez %[t], 1b \n" \ + " br 4f \n" \ + "3: mov %[r], %[e] \n" \ + "4: \n" \ + " .section __ex_table,\"a\" \n" \ + " .balign 4 \n" \ + " .long 1b, 3b \n" \ + " .long 2b, 3b \n" \ + " .previous \n" \ + : [r] "+r" (ret), [ov] "=&r" (oldval), \ + [u] "+m" (*uaddr), [t] "=&r" (tmp) \ + : [op] "Jr" (oparg), [e] "jr" (-EFAULT) \ + : "memory"); \ + \ + __atomic_post_full_fence(); \ +} + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret = 0; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %[t], %[ov]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %[t], %[ov], %[op]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %[t], %[ov], %[op]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and %[t], %[ov], %[op]", + ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %[t], %[ov], %[op]", + ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + if (!ret) + *oval = oldval; + + return ret; +} + + + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 val, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __atomic_pre_full_fence(); + + __asm__ __volatile__ ( + "1: ldex.w %[v], %[u] \n" + " cmpne %[v], %[ov] \n" + " bt 4f \n" + " mov %[t], %[nv] \n" + "2: stex.w %[t], %[u] \n" + " bez %[t], 1b \n" + " br 4f \n" + "3: mov %[r], %[e] \n" + "4: \n" + " .section __ex_table,\"a\" \n" + " .balign 4 \n" + " .long 1b, 3b \n" + " .long 2b, 3b \n" + " .previous \n" + : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), + [t] "=&r" (tmp) + : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "Jr" (-EFAULT) + : "memory"); + + __atomic_post_full_fence(); + + *uval = val; + return ret; +} + +#endif /* CONFIG_SMP */ +#endif /* __ASM_CSKY_FUTEX_H */ diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h new file mode 100644 index 0000000000..1ed810effb --- /dev/null +++ b/arch/csky/include/asm/highmem.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_HIGHMEM_H +#define __ASM_CSKY_HIGHMEM_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include + +/* undef for production */ +#define HIGHMEM_DEBUG 1 + +/* declarations for highmem.c */ +extern unsigned long highstart_pfn, highend_pfn; + +extern pte_t *pkmap_page_table; + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. + */ +#define LAST_PKMAP 1024 +#define LAST_PKMAP_MASK (LAST_PKMAP-1) +#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +#define ARCH_HAS_KMAP_FLUSH_TLB +extern void kmap_flush_tlb(unsigned long addr); + +#define flush_cache_kmaps() do {} while (0) + +#define arch_kmap_local_post_map(vaddr, pteval) kmap_flush_tlb(vaddr) +#define arch_kmap_local_post_unmap(vaddr) kmap_flush_tlb(vaddr) + +extern void kmap_init(void); + +#endif /* __KERNEL__ */ + +#endif /* __ASM_CSKY_HIGHMEM_H */ diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h new file mode 100644 index 0000000000..4725bb977b --- /dev/null +++ b/arch/csky/include/asm/io.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_IO_H +#define __ASM_CSKY_IO_H + +#include +#include + +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + * + * For CACHEV1 (807, 810), store instruction could fast retire, so we need + * another mb() to prevent st fast retire. + * + * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't + * fast retire. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; }) + +#ifdef CONFIG_CPU_HAS_CACHEV2 +#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); }) +#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); }) +#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); }) +#else +#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); }) +#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); }) +#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) +#endif + +/* + * String version of I/O memory access operations. + */ +extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t); +extern void __memcpy_toio(volatile void __iomem *, const void *, size_t); +extern void __memset_io(volatile void __iomem *, int, size_t); + +#define memset_io(c,v,l) __memset_io((c),(v),(l)) +#define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l)) +#define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l)) + +/* + * I/O memory mapping functions. + */ +#define ioremap_wc(addr, size) \ + ioremap_prot((addr), (size), \ + (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED) + +#include + +#endif /* __ASM_CSKY_IO_H */ diff --git a/arch/csky/include/asm/irq_work.h b/arch/csky/include/asm/irq_work.h new file mode 100644 index 0000000000..33aaf39d6f --- /dev/null +++ b/arch/csky/include/asm/irq_work.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_IRQ_WORK_H +#define __ASM_CSKY_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return true; +} +extern void arch_irq_work_raise(void); +#endif /* __ASM_CSKY_IRQ_WORK_H */ diff --git a/arch/csky/include/asm/irqflags.h b/arch/csky/include/asm/irqflags.h new file mode 100644 index 0000000000..9e3a569a55 --- /dev/null +++ b/arch/csky/include/asm/irqflags.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_IRQFLAGS_H +#define __ASM_CSKY_IRQFLAGS_H +#include + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + flags = mfcr("psr"); + asm volatile("psrclr ie\n":::"memory"); + return flags; +} +#define arch_local_irq_save arch_local_irq_save + +static inline void arch_local_irq_enable(void) +{ + asm volatile("psrset ee, ie\n":::"memory"); +} +#define arch_local_irq_enable arch_local_irq_enable + +static inline void arch_local_irq_disable(void) +{ + asm volatile("psrclr ie\n":::"memory"); +} +#define arch_local_irq_disable arch_local_irq_disable + +static inline unsigned long arch_local_save_flags(void) +{ + return mfcr("psr"); +} +#define arch_local_save_flags arch_local_save_flags + +static inline void arch_local_irq_restore(unsigned long flags) +{ + mtcr("psr", flags); +} +#define arch_local_irq_restore arch_local_irq_restore + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1<<6)); +} +#define arch_irqs_disabled_flags arch_irqs_disabled_flags + +#include + +#endif /* __ASM_CSKY_IRQFLAGS_H */ diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h new file mode 100644 index 0000000000..98a3f4b168 --- /dev/null +++ b/arch/csky/include/asm/jump_label.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_JUMP_LABEL_H +#define __ASM_CSKY_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include + +#define JUMP_LABEL_NOP_SIZE 4 + +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) +{ + asm_volatile_goto( + "1: nop32 \n" + " .pushsection __jump_table, \"aw\" \n" + " .align 2 \n" + " .long 1b - ., %l[label] - . \n" + " .long %0 - . \n" + " .popsection \n" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) +{ + asm_volatile_goto( + "1: bsr32 %l[label] \n" + " .pushsection __jump_table, \"aw\" \n" + " .align 2 \n" + " .long 1b - ., %l[label] - . \n" + " .long %0 - . \n" + " .popsection \n" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +enum jump_label_type; +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type); +#define arch_jump_label_transform_static arch_jump_label_transform_static + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CSKY_JUMP_LABEL_H */ diff --git a/arch/csky/include/asm/kprobes.h b/arch/csky/include/asm/kprobes.h new file mode 100644 index 0000000000..55267cbf52 --- /dev/null +++ b/arch/csky/include/asm/kprobes.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_KPROBES_H +#define __ASM_CSKY_KPROBES_H + +#include + +#ifdef CONFIG_KPROBES +#include +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 1 + +#define flush_insn_slot(p) do { } while (0) +#define kretprobe_blacklist_size 0 + +#include + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* Single step context for kprobe */ +struct kprobe_step_ctx { + unsigned long ss_pending; + unsigned long match_addr; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + unsigned long saved_sr; + struct prev_kprobe prev_kprobe; + struct kprobe_step_ctx ss_ctx; +}; + +void arch_remove_kprobe(struct kprobe *p); +int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); +int kprobe_breakpoint_handler(struct pt_regs *regs); +int kprobe_single_step_handler(struct pt_regs *regs); +void __kretprobe_trampoline(void); +void __kprobes *trampoline_probe_handler(struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ +#endif /* __ASM_CSKY_KPROBES_H */ diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h new file mode 100644 index 0000000000..d12179801a --- /dev/null +++ b/arch/csky/include/asm/memory.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_MEMORY_H +#define __ASM_CSKY_MEMORY_H + +#include +#include +#include +#include + +#define FIXADDR_TOP _AC(0xffffc000, UL) +#define PKMAP_BASE _AC(0xff800000, UL) +#define VMALLOC_START (PAGE_OFFSET + LOWMEM_LIMIT + (PAGE_SIZE * 8)) +#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2)) + +#ifdef CONFIG_HAVE_TCM +#ifdef CONFIG_HAVE_DTCM +#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES) +#else +#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES) +#endif +#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL) +#endif + +#endif diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h new file mode 100644 index 0000000000..d78321901d --- /dev/null +++ b/arch/csky/include/asm/mmu.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_MMU_H +#define __ASM_CSKY_MMU_H + +typedef struct { + atomic64_t asid; + void *vdso; + cpumask_t icache_stale_mask; +} mm_context_t; + +#endif /* __ASM_CSKY_MMU_H */ diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h new file mode 100644 index 0000000000..95d99b3079 --- /dev/null +++ b/arch/csky/include/asm/mmu_context.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_MMU_CONTEXT_H +#define __ASM_CSKY_MMU_CONTEXT_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +#define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) +#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) + +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; }) + +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); + +static inline void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + + if (prev != next) + check_and_switch_context(next, cpu); + + setup_pgd(next->pgd, next->context.asid.counter); + + flush_icache_deferred(next); +} + +#include + +#endif /* __ASM_CSKY_MMU_CONTEXT_H */ diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h new file mode 100644 index 0000000000..4a0502e324 --- /dev/null +++ b/arch/csky/include/asm/page.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PAGE_H +#define __ASM_CSKY_PAGE_H + +#include +#include +#include + +/* + * PAGE_SHIFT determines the page size: 4KB + */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define THREAD_SIZE (PAGE_SIZE * 2) +#define THREAD_MASK (~(THREAD_SIZE - 1)) +#define THREAD_SHIFT (PAGE_SHIFT + 1) + + +/* + * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there + * are two segment registers (MSA0 + MSA1) to mapping 512MB + 512MB physical + * address region. We use them mapping kernel 1GB direct-map address area and + * for more than 1GB of memory we use highmem. + */ +#define PAGE_OFFSET CONFIG_PAGE_OFFSET +#define SSEG_SIZE 0x20000000 +#define LOWMEM_LIMIT (SSEG_SIZE * 2) + +#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1)) + +#ifndef __ASSEMBLY__ + +#include + +#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ + (void *)(kaddr) < high_memory) + +extern void *memset(void *dest, int c, size_t l); +extern void *memcpy(void *to, const void *from, size_t l); + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) +#define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr))) + +struct page; + +#include + +struct vm_area_struct; + +typedef struct { unsigned long pte_low; } pte_t; +#define pte_val(x) ((x).pte_low) + +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) + +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +extern unsigned long va_pa_offset; + +#define ARCH_PFN_OFFSET PFN_DOWN(va_pa_offset + PHYS_OFFSET_OFFSET) + +#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + va_pa_offset) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - va_pa_offset)) + +#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) + +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + return __pa(kaddr) >> PAGE_SHIFT; +} + +static inline void * pfn_to_virt(unsigned long pfn) +{ + return (void *)((unsigned long)__va(pfn) << PAGE_SHIFT); +} + +#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \ + PHYS_OFFSET_OFFSET) +#define virt_to_page(x) (mem_map + MAP_NR(x)) + +#define pfn_to_kaddr(x) __va(PFN_PHYS(x)) + +#include +#include + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_CSKY_PAGE_H */ diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h new file mode 100644 index 0000000000..42724c630d --- /dev/null +++ b/arch/csky/include/asm/pci.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_PCI_H +#define __ASM_CSKY_PCI_H + +#include +#include +#include + +#include + +/* Generic PCI */ +#include + +#endif /* __ASM_CSKY_PCI_H */ diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h new file mode 100644 index 0000000000..249905d8a4 --- /dev/null +++ b/arch/csky/include/asm/perf_event.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PERF_EVENT_H +#define __ASM_CSKY_PERF_EVENT_H + +#include + +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->pc = (__ip); \ + regs_fp(regs) = (unsigned long) __builtin_frame_address(0); \ + asm volatile("mov %0, sp\n":"=r"((regs)->usp)); \ +} + +#endif /* __ASM_PERF_EVENT_ELF_H */ diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h new file mode 100644 index 0000000000..9c84c9012e --- /dev/null +++ b/arch/csky/include/asm/pgalloc.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PGALLOC_H +#define __ASM_CSKY_PGALLOC_H + +#include +#include +#include + +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL +#include + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + set_pmd(pmd, __pmd(__pa(pte))); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte) +{ + set_pmd(pmd, __pmd(__pa(page_address(pte)))); +} + +extern void pgd_init(unsigned long *p); + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + pte_t *pte; + unsigned long i; + + pte = (pte_t *) __get_free_page(GFP_KERNEL); + if (!pte) + return NULL; + + for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) + (pte + i)->pte_low = _PAGE_GLOBAL; + + return pte; +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret; + pgd_t *init; + + ret = (pgd_t *) __get_free_page(GFP_KERNEL); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long *)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + /* prevent out of order excute */ + smp_mb(); +#ifdef CONFIG_CPU_NEED_TLBSYNC + dcache_wb_range((unsigned int)ret, + (unsigned int)(ret + PTRS_PER_PGD)); +#endif + } + + return ret; +} + +#define __pte_free_tlb(tlb, pte, address) \ +do { \ + pagetable_pte_dtor(page_ptdesc(pte)); \ + tlb_remove_page_ptdesc(tlb, page_ptdesc(pte)); \ +} while (0) + +extern void pagetable_init(void); +extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn); +extern void pre_trap_init(void); + +#endif /* __ASM_CSKY_PGALLOC_H */ diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h new file mode 100644 index 0000000000..a397e1718a --- /dev/null +++ b/arch/csky/include/asm/pgtable.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PGTABLE_H +#define __ASM_CSKY_PGTABLE_H + +#include +#include +#include +#include +#include + +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE) + +/* + * C-SKY is two-level paging structure: + */ + +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) +#define PTRS_PER_PMD 1 +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +#define PFN_PTE_SHIFT PAGE_SHIFT +#define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT) +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#define pte_clear(mm, addr, ptep) set_pte((ptep), \ + (((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) +#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) +#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ + | pgprot_val(prot)) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \ + pgprot_val(pgprot)) + +/* + * C-SKY only has VALID and DIRTY bit in hardware. So we need to use the + * two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED. + */ +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) + +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) +#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ | \ + _CACHE_CACHED) +#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \ + _CACHE_CACHED) +#define PAGE_SHARED PAGE_WRITE + +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \ + _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \ + _PAGE_GLOBAL | \ + _CACHE_CACHED) + +#define _PAGE_IOREMAP (_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \ + _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \ + _PAGE_GLOBAL | \ + _CACHE_UNCACHED | _PAGE_SO) + +#define _PAGE_CHG_MASK (~(unsigned long) \ + (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _CACHE_MASK | _PAGE_GLOBAL)) + +#define MAX_SWAPFILES_CHECK() \ + BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5) + +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +extern void load_pgd(unsigned long pg_dir); +extern pte_t invalid_pte_table[PTRS_PER_PTE]; + +static inline void set_pte(pte_t *p, pte_t pte) +{ + *p = pte; +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif + /* prevent out of order excution */ + smp_mb(); +} + +static inline pte_t *pmd_page_vaddr(pmd_t pmd) +{ + unsigned long ptr; + + ptr = pmd_val(pmd); + + return __va(ptr); +} + +#define pmd_phys(pmd) pmd_val(pmd) + +static inline void set_pmd(pmd_t *p, pmd_t pmd) +{ + *p = pmd; +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif + /* prevent specul excute */ + smp_mb(); +} + + +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == __pa(invalid_pte_table); +} + +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +static inline int pmd_present(pmd_t pmd) +{ + return (pmd_val(pmd) != __pa(invalid_pte_table)); +} + +static inline void pmd_clear(pmd_t *p) +{ + pmd_val(*p) = (__pa(invalid_pte_table)); +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_read(pte_t pte) +{ + return pte.pte_low & _PAGE_READ; +} + +static inline int pte_write(pte_t pte) +{ + return (pte).pte_low & _PAGE_WRITE; +} + +static inline int pte_dirty(pte_t pte) +{ + return (pte).pte_low & _PAGE_MODIFIED; +} + +static inline int pte_young(pte_t pte) +{ + return (pte).pte_low & _PAGE_ACCESSED; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID); + return pte; +} + +static inline pte_t pte_mkwrite_novma(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_VALID; + return pte; +} + +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); + +/* + * Macro to make mark a page protection value as "uncacheable". Note + * that "protection" is really a misnomer here as the protection value + * contains the memory attribute bits, dirty bits, and various other + * bits as well. + */ +#define pgprot_noncached pgprot_noncached + +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO; + + return __pgprot(prot); +} + +#define pgprot_writecombine pgprot_writecombine +static inline pgprot_t pgprot_writecombine(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; + + return __pgprot(prot); +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | + (pgprot_val(newprot))); +} + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern void paging_init(void); + +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long address, pte_t *pte, unsigned int nr); +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#endif /* __ASM_CSKY_PGTABLE_H */ diff --git a/arch/csky/include/asm/probes.h b/arch/csky/include/asm/probes.h new file mode 100644 index 0000000000..5e526334e6 --- /dev/null +++ b/arch/csky/include/asm/probes.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PROBES_H +#define __ASM_CSKY_PROBES_H + +typedef u32 probe_opcode_t; +typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *); + +/* architecture specific copy of original instruction */ +struct arch_probe_insn { + probe_opcode_t *insn; + probes_handler_t *handler; + /* restore address after simulation */ + unsigned long restore; +}; + +#ifdef CONFIG_KPROBES +typedef u32 kprobe_opcode_t; +struct arch_specific_insn { + struct arch_probe_insn api; +}; +#endif + +#endif /* __ASM_CSKY_PROBES_H */ diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h new file mode 100644 index 0000000000..e487a46d1c --- /dev/null +++ b/arch/csky/include/asm/processor.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PROCESSOR_H +#define __ASM_CSKY_PROCESSOR_H + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_CPU_HAS_FPU +#include +#endif + +struct cpuinfo_csky { + unsigned long asid_cache; +} __aligned(SMP_CACHE_BYTES); + +extern struct cpuinfo_csky cpu_data[]; + +/* + * User space process size: 2GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. TASK_SIZE + * for a 64 bit kernel expandable to 8192EB, of which the current CSKY + * implementations will "only" be able to use 1TB ... + */ +#define TASK_SIZE (PAGE_OFFSET - (PAGE_SIZE * 8)) + +#ifdef __KERNEL__ +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP +#endif + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) + +struct thread_struct { + unsigned long sp; /* kernel stack pointer */ + unsigned long trap_no; /* saved status register */ + + /* FPU regs */ + struct user_fp __aligned(16) user_fp; +}; + +#define INIT_THREAD { \ + .sp = sizeof(init_stack) + (unsigned long) &init_stack, \ +} + +/* + * Do necessary setup to start up a newly executed thread. + * + * pass the data segment into user programs if it exists, + * it can't hurt anything as far as I can tell + */ +#define start_thread(_regs, _pc, _usp) \ +do { \ + (_regs)->pc = (_pc); \ + (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \ + (_regs)->regs[2] = 0; \ + (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \ + (_regs)->sr &= ~PS_S; \ + (_regs)->usp = (_usp); \ +} while (0) + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +unsigned long __get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) + +#define task_pt_regs(p) \ + ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) + +#define cpu_relax() barrier() + +register unsigned long current_stack_pointer __asm__("sp"); + +#endif /* __ASM_CSKY_PROCESSOR_H */ diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h new file mode 100644 index 0000000000..0634b7895d --- /dev/null +++ b/arch/csky/include/asm/ptrace.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PTRACE_H +#define __ASM_CSKY_PTRACE_H + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +#define PS_S 0x80000000 /* Supervisor Mode */ + +#define USR_BKPT 0x1464 + +#define arch_has_single_step() (1) +#define current_pt_regs() \ +({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; }) + +#define user_stack_pointer(regs) ((regs)->usp) + +#define user_mode(regs) (!((regs)->sr & PS_S)) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +#define trap_no(regs) ((regs->sr >> 16) & 0xff) + +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->pc = val; +} + +#if defined(__CSKYABIV2__) +#define MAX_REG_OFFSET offsetof(struct pt_regs, dcsr) +#else +#define MAX_REG_OFFSET offsetof(struct pt_regs, regs[9]) +#endif + +static inline bool in_syscall(struct pt_regs const *regs) +{ + return ((regs->sr >> 16) & 0xff) == VEC_TRAP0; +} + +static inline void forget_syscall(struct pt_regs *regs) +{ + regs->sr &= ~(0xff << 16); +} + +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->a0; +} + +static inline void regs_set_return_value(struct pt_regs *regs, + unsigned long val) +{ + regs->a0 = val; +} + +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->usp; +} + +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return regs->regs[4]; +} +static inline void frame_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->regs[4] = val; +} + +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +/* + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + +asmlinkage int syscall_trace_enter(struct pt_regs *regs); +asmlinkage void syscall_trace_exit(struct pt_regs *regs); +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CSKY_PTRACE_H */ diff --git a/arch/csky/include/asm/reg_ops.h b/arch/csky/include/asm/reg_ops.h new file mode 100644 index 0000000000..cccf7d525f --- /dev/null +++ b/arch/csky/include/asm/reg_ops.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_REGS_OPS_H +#define __ASM_REGS_OPS_H + +#define mfcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile( \ + "mfcr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define mtcr(reg, val) \ +({ \ + asm volatile( \ + "mtcr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +#endif /* __ASM_REGS_OPS_H */ diff --git a/arch/csky/include/asm/seccomp.h b/arch/csky/include/asm/seccomp.h new file mode 100644 index 0000000000..d33e758126 --- /dev/null +++ b/arch/csky/include/asm/seccomp.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include + +#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_CSKY +#define SECCOMP_ARCH_NATIVE_NR NR_syscalls +#define SECCOMP_ARCH_NATIVE_NAME "csky" + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/csky/include/asm/sections.h b/arch/csky/include/asm/sections.h new file mode 100644 index 0000000000..83e82b7c0f --- /dev/null +++ b/arch/csky/include/asm/sections.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_SECTIONS_H +#define __ASM_SECTIONS_H + +#include + +extern char _start[]; + +asmlinkage void csky_start(unsigned int unused, void *dtb_start); + +#endif /* __ASM_SECTIONS_H */ diff --git a/arch/csky/include/asm/shmparam.h b/arch/csky/include/asm/shmparam.h new file mode 100644 index 0000000000..2fe6cea0da --- /dev/null +++ b/arch/csky/include/asm/shmparam.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SHMPARAM_H +#define __ASM_CSKY_SHMPARAM_H + +#define SHMLBA (4 * PAGE_SIZE) + +#define __ARCH_FORCE_SHMLBA + +#endif /* __ASM_CSKY_SHMPARAM_H */ diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h new file mode 100644 index 0000000000..d3db334f31 --- /dev/null +++ b/arch/csky/include/asm/smp.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SMP_H +#define __ASM_CSKY_SMP_H + +#include +#include +#include + +#ifdef CONFIG_SMP + +void __init setup_smp(void); + +void __init setup_smp_ipi(void); + +void arch_send_call_function_ipi_mask(struct cpumask *mask); + +void arch_send_call_function_single_ipi(int cpu); + +void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq); + +#define raw_smp_processor_id() (current_thread_info()->cpu) + +int __cpu_disable(void); + +static inline void __cpu_die(unsigned int cpu) { } + +#endif /* CONFIG_SMP */ + +#endif /* __ASM_CSKY_SMP_H */ diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h new file mode 100644 index 0000000000..83a2005341 --- /dev/null +++ b/arch/csky/include/asm/spinlock.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_H +#define __ASM_CSKY_SPINLOCK_H + +#include +#include + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#endif /* __ASM_CSKY_SPINLOCK_H */ diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h new file mode 100644 index 0000000000..75bdf3af80 --- /dev/null +++ b/arch/csky/include/asm/spinlock_types.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_TYPES_H +#define __ASM_CSKY_SPINLOCK_TYPES_H + +#include +#include + +#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h new file mode 100644 index 0000000000..d237474471 --- /dev/null +++ b/arch/csky/include/asm/stackprotector.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary = get_random_canary(); + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* __ASM_SH_STACKPROTECTOR_H */ diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h new file mode 100644 index 0000000000..a0d81e9d6b --- /dev/null +++ b/arch/csky/include/asm/string.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _CSKY_STRING_MM_H_ +#define _CSKY_STRING_MM_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#endif + +#endif /* _CSKY_STRING_MM_H_ */ diff --git a/arch/csky/include/asm/switch_to.h b/arch/csky/include/asm/switch_to.h new file mode 100644 index 0000000000..731e466415 --- /dev/null +++ b/arch/csky/include/asm/switch_to.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SWITCH_TO_H +#define __ASM_CSKY_SWITCH_TO_H + +#include +#ifdef CONFIG_CPU_HAS_FPU +#include +static inline void __switch_to_fpu(struct task_struct *prev, + struct task_struct *next) +{ + save_to_user_fp(&prev->thread.user_fp); + restore_from_user_fp(&next->thread.user_fp); +} +#else +static inline void __switch_to_fpu(struct task_struct *prev, + struct task_struct *next) +{} +#endif + +/* + * Context switching is now performed out-of-line in switch_to.S + */ +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); + +#define switch_to(prev, next, last) \ + do { \ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + __switch_to_fpu(__prev, __next); \ + ((last) = __switch_to((prev), (next))); \ + } while (0) + +#endif /* __ASM_CSKY_SWITCH_TO_H */ diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h new file mode 100644 index 0000000000..0de5734950 --- /dev/null +++ b/arch/csky/include/asm/syscall.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_SYSCALL_H +#define __ASM_SYSCALL_H + +#include +#include +#include +#include + +extern void *sys_call_table[]; + +static inline int +syscall_get_nr(struct task_struct *task, struct pt_regs *regs) +{ + return regs_syscallid(regs); +} + +static inline void +syscall_set_nr(struct task_struct *task, struct pt_regs *regs, + int sysno) +{ + regs_syscallid(regs) = sysno; +} + +static inline void +syscall_rollback(struct task_struct *task, struct pt_regs *regs) +{ + regs->a0 = regs->orig_a0; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + unsigned long error = regs->a0; + + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long +syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) +{ + return regs->a0; +} + +static inline void +syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val) +{ + regs->a0 = (long) error ?: val; +} + +static inline void +syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned long *args) +{ + args[0] = regs->orig_a0; + args++; + memcpy(args, ®s->a1, 5 * sizeof(args[0])); +} + +static inline int +syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_CSKY; +} + +#endif /* __ASM_SYSCALL_H */ diff --git a/arch/csky/include/asm/syscalls.h b/arch/csky/include/asm/syscalls.h new file mode 100644 index 0000000000..ea9ce6138b --- /dev/null +++ b/arch/csky/include/asm/syscalls.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SYSCALLS_H +#define __ASM_CSKY_SYSCALLS_H + +#include + +long sys_cacheflush(void __user *, unsigned long, int); + +long sys_set_thread_area(unsigned long addr); + +long sys_csky_fadvise64_64(int fd, int advice, loff_t offset, loff_t len); + +#endif /* __ASM_CSKY_SYSCALLS_H */ diff --git a/arch/csky/include/asm/tcm.h b/arch/csky/include/asm/tcm.h new file mode 100644 index 0000000000..bd1e662ecd --- /dev/null +++ b/arch/csky/include/asm/tcm.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_TCM_H +#define __ASM_CSKY_TCM_H + +#ifndef CONFIG_HAVE_TCM +#error "You should not be including tcm.h unless you have a TCM!" +#endif + +#include + +/* Tag variables with this */ +#define __tcmdata __section(".tcm.data") +/* Tag constants with this */ +#define __tcmconst __section(".tcm.rodata") +/* Tag functions inside TCM called from outside TCM with this */ +#define __tcmfunc __section(".tcm.text") noinline +/* Tag function inside TCM called from inside TCM with this */ +#define __tcmlocalfunc __section(".tcm.text") + +void *tcm_alloc(size_t len); +void tcm_free(void *addr, size_t len); + +#endif diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h new file mode 100644 index 0000000000..b5ed788f0c --- /dev/null +++ b/arch/csky/include/asm/thread_info.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_CSKY_THREAD_INFO_H +#define _ASM_CSKY_THREAD_INFO_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +struct thread_info { + struct task_struct *task; + void *dump_exec_domain; + unsigned long flags; + int preempt_count; + unsigned long tp_value; + struct restart_block restart_block; + struct pt_regs *regs; + unsigned int cpu; +}; + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .cpu = 0, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) + +#define thread_saved_fp(tsk) \ + ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r8)) + +#define thread_saved_sp(tsk) \ + ((unsigned long)(tsk->thread.sp)) + +#define thread_saved_lr(tsk) \ + ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r15)) + +static inline struct thread_info *current_thread_info(void) +{ + unsigned long sp; + + asm volatile("mov %0, sp\n":"=r"(sp)); + + return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); +} + +#endif /* !__ASSEMBLY__ */ + +#define TIF_SIGPENDING 0 /* signal pending */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_UPROBE 3 /* uprobe breakpoint or singlestep */ +#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ +#define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */ +#define TIF_SYSCALL_AUDIT 6 /* syscall auditing */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ +#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ +#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ +#define TIF_SECCOMP 21 /* secure computing */ + +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_MEMDIE (1 << TIF_MEMDIE) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) + +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NOTIFY_SIGNAL) + +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + +#endif /* _ASM_CSKY_THREAD_INFO_H */ diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h new file mode 100644 index 0000000000..702861c688 --- /dev/null +++ b/arch/csky/include/asm/tlb.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_TLB_H +#define __ASM_CSKY_TLB_H + +#include +#include + +#endif /* __ASM_CSKY_TLB_H */ diff --git a/arch/csky/include/asm/tlbflush.h b/arch/csky/include/asm/tlbflush.h new file mode 100644 index 0000000000..407160b4fd --- /dev/null +++ b/arch/csky/include/asm/tlbflush.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_TLBFLUSH_H +#define __ASM_TLBFLUSH_H + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +extern void flush_tlb_one(unsigned long vaddr); + +#endif diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h new file mode 100644 index 0000000000..732c4aaa2e --- /dev/null +++ b/arch/csky/include/asm/traps.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_TRAPS_H +#define __ASM_CSKY_TRAPS_H + +#include + +#define VEC_RESET 0 +#define VEC_ALIGN 1 +#define VEC_ACCESS 2 +#define VEC_ZERODIV 3 +#define VEC_ILLEGAL 4 +#define VEC_PRIV 5 +#define VEC_TRACE 6 +#define VEC_BREAKPOINT 7 +#define VEC_UNRECOVER 8 +#define VEC_SOFTRESET 9 +#define VEC_AUTOVEC 10 +#define VEC_FAUTOVEC 11 +#define VEC_HWACCEL 12 + +#define VEC_TLBMISS 14 +#define VEC_TLBMODIFIED 15 + +#define VEC_TRAP0 16 +#define VEC_TRAP1 17 +#define VEC_TRAP2 18 +#define VEC_TRAP3 19 + +#define VEC_TLBINVALIDL 20 +#define VEC_TLBINVALIDS 21 + +#define VEC_PRFL 29 +#define VEC_FPE 30 + +extern void *vec_base[]; + +#define VEC_INIT(i, func) \ +do { \ + vec_base[i] = (void *)func; \ +} while (0) + +void csky_alignment(struct pt_regs *regs); + +asmlinkage void do_trap_unknown(struct pt_regs *regs); +asmlinkage void do_trap_zdiv(struct pt_regs *regs); +asmlinkage void do_trap_buserr(struct pt_regs *regs); +asmlinkage void do_trap_misaligned(struct pt_regs *regs); +asmlinkage void do_trap_bkpt(struct pt_regs *regs); +asmlinkage void do_trap_illinsn(struct pt_regs *regs); +asmlinkage void do_trap_fpe(struct pt_regs *regs); +asmlinkage void do_trap_priv(struct pt_regs *regs); +asmlinkage void trap_c(struct pt_regs *regs); + +asmlinkage void do_notify_resume(struct pt_regs *regs, + unsigned long thread_info_flags); + +void trap_init(void); + +#endif /* __ASM_CSKY_TRAPS_H */ diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h new file mode 100644 index 0000000000..2e927c21d8 --- /dev/null +++ b/arch/csky/include/asm/uaccess.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_UACCESS_H +#define __ASM_CSKY_UACCESS_H + +/* + * __put_user_fn + */ +extern int __put_user_bad(void); + +#define __put_user_asm_b(x, ptr, err) \ +do { \ + int errcode; \ + __asm__ __volatile__( \ + "1: stb %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b,2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_h(x, ptr, err) \ +do { \ + int errcode; \ + __asm__ __volatile__( \ + "1: sth %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b,2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_w(x, ptr, err) \ +do { \ + int errcode; \ + __asm__ __volatile__( \ + "1: stw %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_64(x, ptr, err) \ +do { \ + int tmp; \ + int errcode; \ + \ + __asm__ __volatile__( \ + " ldw %3, (%1, 0) \n" \ + "1: stw %3, (%2, 0) \n" \ + " ldw %3, (%1, 4) \n" \ + "2: stw %3, (%2, 4) \n" \ + " br 4f \n" \ + "3: mov %0, %4 \n" \ + " br 4f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 3b \n" \ + ".long 2b, 3b \n" \ + ".previous \n" \ + "4: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), \ + "=r"(tmp), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(0), \ + "4"(-EFAULT) \ + : "memory"); \ +} while (0) + +static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +{ + int retval = 0; + u32 tmp; + + switch (size) { + case 1: + tmp = *(u8 *)x; + __put_user_asm_b(tmp, ptr, retval); + break; + case 2: + tmp = *(u16 *)x; + __put_user_asm_h(tmp, ptr, retval); + break; + case 4: + tmp = *(u32 *)x; + __put_user_asm_w(tmp, ptr, retval); + break; + case 8: + __put_user_asm_64(x, (u64 *)ptr, retval); + break; + } + + return retval; +} +#define __put_user_fn __put_user_fn + +/* + * __get_user_fn + */ +extern int __get_user_bad(void); + +#define __get_user_asm_common(x, ptr, ins, err) \ +do { \ + int errcode; \ + __asm__ __volatile__( \ + "1: " ins " %1, (%4, 0) \n" \ + " br 3f \n" \ + "2: mov %0, %2 \n" \ + " movi %1, 0 \n" \ + " br 3f \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(errcode) \ + : "0"(0), "r"(ptr), "2"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __get_user_asm_64(x, ptr, err) \ +do { \ + int tmp; \ + int errcode; \ + \ + __asm__ __volatile__( \ + "1: ldw %3, (%2, 0) \n" \ + " stw %3, (%1, 0) \n" \ + "2: ldw %3, (%2, 4) \n" \ + " stw %3, (%1, 4) \n" \ + " br 4f \n" \ + "3: mov %0, %4 \n" \ + " br 4f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 3b \n" \ + ".long 2b, 3b \n" \ + ".previous \n" \ + "4: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), \ + "=r"(tmp), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(0), \ + "4"(-EFAULT) \ + : "memory"); \ +} while (0) + +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +{ + int retval; + u32 tmp; + + switch (size) { + case 1: + __get_user_asm_common(tmp, ptr, "ldb", retval); + *(u8 *)x = (u8)tmp; + break; + case 2: + __get_user_asm_common(tmp, ptr, "ldh", retval); + *(u16 *)x = (u16)tmp; + break; + case 4: + __get_user_asm_common(tmp, ptr, "ldw", retval); + *(u32 *)x = (u32)tmp; + break; + case 8: + __get_user_asm_64(x, ptr, retval); + break; + } + + return retval; +} +#define __get_user_fn __get_user_fn + +unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n); +unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); + +unsigned long __clear_user(void __user *to, unsigned long n); +#define __clear_user __clear_user + +#include + +#endif /* __ASM_CSKY_UACCESS_H */ diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h new file mode 100644 index 0000000000..9cf97de9a2 --- /dev/null +++ b/arch/csky/include/asm/unistd.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/csky/include/asm/uprobes.h b/arch/csky/include/asm/uprobes.h new file mode 100644 index 0000000000..600388eb93 --- /dev/null +++ b/arch/csky/include/asm/uprobes.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_UPROBES_H +#define __ASM_CSKY_UPROBES_H + +#include + +#define MAX_UINSN_BYTES 4 + +#define UPROBE_SWBP_INSN USR_BKPT +#define UPROBE_SWBP_INSN_SIZE 2 +#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES + +typedef u32 uprobe_opcode_t; + +struct arch_uprobe_task { + unsigned long saved_trap_no; +}; + +struct arch_uprobe { + union { + u8 insn[MAX_UINSN_BYTES]; + u8 ixol[MAX_UINSN_BYTES]; + }; + struct arch_probe_insn api; + unsigned long insn_size; + bool simulate; +}; + +int uprobe_breakpoint_handler(struct pt_regs *regs); +int uprobe_single_step_handler(struct pt_regs *regs); + +#endif /* __ASM_CSKY_UPROBES_H */ diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h new file mode 100644 index 0000000000..bdce581b5f --- /dev/null +++ b/arch/csky/include/asm/vdso.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_VDSO_H +#define __ASM_CSKY_VDSO_H + +#include + +#ifndef GENERIC_TIME_VSYSCALL +struct vdso_data { +}; +#endif + +/* + * The VDSO symbols are mapped into Linux so we can just use regular symbol + * addressing to get their offsets in userspace. The symbols are mapped at an + * offset of 0, but since the linker must support setting weak undefined + * symbols to the absolute address 0 it also happens to support other low + * addresses even when the code model suggests those low addresses would not + * otherwise be available. + */ +#define VDSO_SYMBOL(base, name) \ +({ \ + extern const char __vdso_##name[]; \ + (void __user *)((unsigned long)(base) + __vdso_##name); \ +}) + +#endif /* __ASM_CSKY_VDSO_H */ diff --git a/arch/csky/include/asm/vdso/clocksource.h b/arch/csky/include/asm/vdso/clocksource.h new file mode 100644 index 0000000000..dfca7b4724 --- /dev/null +++ b/arch/csky/include/asm/vdso/clocksource.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H +#define __ASM_VDSO_CSKY_CLOCKSOURCE_H + +#define VDSO_ARCH_CLOCKMODES \ + VDSO_CLOCKMODE_ARCHTIMER + +#endif /* __ASM_VDSO_CSKY_CLOCKSOURCE_H */ diff --git a/arch/csky/include/asm/vdso/gettimeofday.h b/arch/csky/include/asm/vdso/gettimeofday.h new file mode 100644 index 0000000000..6c4f144694 --- /dev/null +++ b/arch/csky/include/asm/vdso/gettimeofday.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_CSKY_GETTIMEOFDAY_H +#define __ASM_VDSO_CSKY_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +#define VDSO_HAS_CLOCK_GETRES 1 + +static __always_inline +int gettimeofday_fallback(struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct __kernel_old_timeval *tv asm("a0") = _tv; + register struct timezone *tz asm("a1") = _tz; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_gettimeofday; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(tv), "r"(tz), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_clock_gettime64; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct old_timespec32 *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_clock_gettime; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_clock_getres_time64; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct old_timespec32 *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_clock_getres; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +uint64_t csky_pmu_read_cc(void); +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) +{ +#ifdef CONFIG_CSKY_PMU_V1 + return csky_pmu_read_cc(); +#else + return 0; +#endif +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return _vdso_data; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_CSKY_GETTIMEOFDAY_H */ diff --git a/arch/csky/include/asm/vdso/processor.h b/arch/csky/include/asm/vdso/processor.h new file mode 100644 index 0000000000..39a6b561d0 --- /dev/null +++ b/arch/csky/include/asm/vdso/processor.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_VDSO_CSKY_PROCESSOR_H +#define __ASM_VDSO_CSKY_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#define cpu_relax() barrier() + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_CSKY_PROCESSOR_H */ diff --git a/arch/csky/include/asm/vdso/vsyscall.h b/arch/csky/include/asm/vdso/vsyscall.h new file mode 100644 index 0000000000..c276211a7c --- /dev/null +++ b/arch/csky/include/asm/vdso/vsyscall.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_CSKY_VSYSCALL_H +#define __ASM_VDSO_CSKY_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include + +extern struct vdso_data *vdso_data; + +static __always_inline struct vdso_data *__csky_get_k_vdso_data(void) +{ + return vdso_data; +} +#define __arch_get_k_vdso_data __csky_get_k_vdso_data + +#include + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_CSKY_VSYSCALL_H */ diff --git a/arch/csky/include/asm/vmalloc.h b/arch/csky/include/asm/vmalloc.h new file mode 100644 index 0000000000..43dca6336b --- /dev/null +++ b/arch/csky/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_CSKY_VMALLOC_H +#define _ASM_CSKY_VMALLOC_H + +#endif /* _ASM_CSKY_VMALLOC_H */ diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild new file mode 100644 index 0000000000..e784701419 --- /dev/null +++ b/arch/csky/include/uapi/asm/Kbuild @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += ucontext.h diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h new file mode 100644 index 0000000000..1aedd513b6 --- /dev/null +++ b/arch/csky/include/uapi/asm/byteorder.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef __ASM_CSKY_BYTEORDER_H +#define __ASM_CSKY_BYTEORDER_H + +#include + +#endif /* __ASM_CSKY_BYTEORDER_H */ diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h new file mode 100644 index 0000000000..ed7fad1ea2 --- /dev/null +++ b/arch/csky/include/uapi/asm/cachectl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef __ASM_CSKY_CACHECTL_H +#define __ASM_CSKY_CACHECTL_H + +/* + * See "man cacheflush" + */ +#define ICACHE (1<<0) +#define DCACHE (1<<1) +#define BCACHE (ICACHE|DCACHE) + +#endif /* __ASM_CSKY_CACHECTL_H */ diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h new file mode 100644 index 0000000000..d0a8ac6a1b --- /dev/null +++ b/arch/csky/include/uapi/asm/perf_regs.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _ASM_CSKY_PERF_REGS_H +#define _ASM_CSKY_PERF_REGS_H + +/* Index of struct pt_regs */ +enum perf_event_csky_regs { + PERF_REG_CSKY_TLS, + PERF_REG_CSKY_LR, + PERF_REG_CSKY_PC, + PERF_REG_CSKY_SR, + PERF_REG_CSKY_SP, + PERF_REG_CSKY_ORIG_A0, + PERF_REG_CSKY_A0, + PERF_REG_CSKY_A1, + PERF_REG_CSKY_A2, + PERF_REG_CSKY_A3, + PERF_REG_CSKY_REGS0, + PERF_REG_CSKY_REGS1, + PERF_REG_CSKY_REGS2, + PERF_REG_CSKY_REGS3, + PERF_REG_CSKY_REGS4, + PERF_REG_CSKY_REGS5, + PERF_REG_CSKY_REGS6, + PERF_REG_CSKY_REGS7, + PERF_REG_CSKY_REGS8, + PERF_REG_CSKY_REGS9, +#if defined(__CSKYABIV2__) + PERF_REG_CSKY_EXREGS0, + PERF_REG_CSKY_EXREGS1, + PERF_REG_CSKY_EXREGS2, + PERF_REG_CSKY_EXREGS3, + PERF_REG_CSKY_EXREGS4, + PERF_REG_CSKY_EXREGS5, + PERF_REG_CSKY_EXREGS6, + PERF_REG_CSKY_EXREGS7, + PERF_REG_CSKY_EXREGS8, + PERF_REG_CSKY_EXREGS9, + PERF_REG_CSKY_EXREGS10, + PERF_REG_CSKY_EXREGS11, + PERF_REG_CSKY_EXREGS12, + PERF_REG_CSKY_EXREGS13, + PERF_REG_CSKY_EXREGS14, + PERF_REG_CSKY_HI, + PERF_REG_CSKY_LO, + PERF_REG_CSKY_DCSR, +#endif + PERF_REG_CSKY_MAX, +}; +#endif /* _ASM_CSKY_PERF_REGS_H */ diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h new file mode 100644 index 0000000000..3be9c14334 --- /dev/null +++ b/arch/csky/include/uapi/asm/ptrace.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _CSKY_PTRACE_H +#define _CSKY_PTRACE_H + +#ifndef __ASSEMBLY__ + +struct pt_regs { + unsigned long tls; + unsigned long lr; + unsigned long pc; + unsigned long sr; + unsigned long usp; + + /* + * a0, a1, a2, a3: + * abiv1: r2, r3, r4, r5 + * abiv2: r0, r1, r2, r3 + */ + unsigned long orig_a0; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + + /* + * ABIV2: r4 ~ r13 + * ABIV1: r6 ~ r14, r1 + */ + unsigned long regs[10]; + +#if defined(__CSKYABIV2__) + /* r16 ~ r30 */ + unsigned long exregs[15]; + + unsigned long rhi; + unsigned long rlo; + unsigned long dcsr; +#endif +}; + +struct user_fp { + unsigned long vr[96]; + unsigned long fcr; + unsigned long fesr; + unsigned long fid; + unsigned long reserved; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* _CSKY_PTRACE_H */ diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h new file mode 100644 index 0000000000..859afb6024 --- /dev/null +++ b/arch/csky/include/uapi/asm/sigcontext.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef __ASM_CSKY_SIGCONTEXT_H +#define __ASM_CSKY_SIGCONTEXT_H + +#include + +struct sigcontext { + struct pt_regs sc_pt_regs; + struct user_fp sc_user_fp; +}; + +#endif /* __ASM_CSKY_SIGCONTEXT_H */ diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h new file mode 100644 index 0000000000..7ff6a2466a --- /dev/null +++ b/arch/csky/include/uapi/asm/unistd.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_TIME32_SYSCALLS +#include + +#define __NR_set_thread_area (__NR_arch_specific_syscall + 0) +__SYSCALL(__NR_set_thread_area, sys_set_thread_area) +#define __NR_cacheflush (__NR_arch_specific_syscall + 1) +__SYSCALL(__NR_cacheflush, sys_cacheflush) diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile new file mode 100644 index 0000000000..8a868316b9 --- /dev/null +++ b/arch/csky/kernel/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +extra-y := vmlinux.lds + +obj-y += head.o entry.o atomic.o signal.o traps.o irq.o time.o vdso.o vdso/ +obj-y += power.o syscall.o syscall_table.o setup.o io.o +obj-y += process.o cpu-probe.o ptrace.o stacktrace.o +obj-y += probes/ + +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o +obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o +obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +endif diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c new file mode 100644 index 0000000000..d1e9035794 --- /dev/null +++ b/arch/csky/kernel/asm-offsets.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +int main(void) +{ + /* offsets into the task struct */ + DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); + DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); + DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); + DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); + DEFINE(TASK_MM, offsetof(struct task_struct, mm)); + DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + + /* offsets into the thread struct */ + DEFINE(THREAD_KSP, offsetof(struct thread_struct, sp)); + DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr)); + DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr)); + DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr)); + + /* offsets into the thread_info struct */ + DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value)); + DEFINE(TINFO_TASK, offsetof(struct thread_info, task)); + + /* offsets into the pt_regs */ + DEFINE(PT_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_ORIG_AO, offsetof(struct pt_regs, orig_a0)); + DEFINE(PT_SR, offsetof(struct pt_regs, sr)); + + DEFINE(PT_A0, offsetof(struct pt_regs, a0)); + DEFINE(PT_A1, offsetof(struct pt_regs, a1)); + DEFINE(PT_A2, offsetof(struct pt_regs, a2)); + DEFINE(PT_A3, offsetof(struct pt_regs, a3)); + DEFINE(PT_REGS0, offsetof(struct pt_regs, regs[0])); + DEFINE(PT_REGS1, offsetof(struct pt_regs, regs[1])); + DEFINE(PT_REGS2, offsetof(struct pt_regs, regs[2])); + DEFINE(PT_REGS3, offsetof(struct pt_regs, regs[3])); + DEFINE(PT_REGS4, offsetof(struct pt_regs, regs[4])); + DEFINE(PT_REGS5, offsetof(struct pt_regs, regs[5])); + DEFINE(PT_REGS6, offsetof(struct pt_regs, regs[6])); + DEFINE(PT_REGS7, offsetof(struct pt_regs, regs[7])); + DEFINE(PT_REGS8, offsetof(struct pt_regs, regs[8])); + DEFINE(PT_REGS9, offsetof(struct pt_regs, regs[9])); + DEFINE(PT_R15, offsetof(struct pt_regs, lr)); +#if defined(__CSKYABIV2__) + DEFINE(PT_R16, offsetof(struct pt_regs, exregs[0])); + DEFINE(PT_R17, offsetof(struct pt_regs, exregs[1])); + DEFINE(PT_R18, offsetof(struct pt_regs, exregs[2])); + DEFINE(PT_R19, offsetof(struct pt_regs, exregs[3])); + DEFINE(PT_R20, offsetof(struct pt_regs, exregs[4])); + DEFINE(PT_R21, offsetof(struct pt_regs, exregs[5])); + DEFINE(PT_R22, offsetof(struct pt_regs, exregs[6])); + DEFINE(PT_R23, offsetof(struct pt_regs, exregs[7])); + DEFINE(PT_R24, offsetof(struct pt_regs, exregs[8])); + DEFINE(PT_R25, offsetof(struct pt_regs, exregs[9])); + DEFINE(PT_R26, offsetof(struct pt_regs, exregs[10])); + DEFINE(PT_R27, offsetof(struct pt_regs, exregs[11])); + DEFINE(PT_R28, offsetof(struct pt_regs, exregs[12])); + DEFINE(PT_R29, offsetof(struct pt_regs, exregs[13])); + DEFINE(PT_R30, offsetof(struct pt_regs, exregs[14])); + DEFINE(PT_R31, offsetof(struct pt_regs, exregs[15])); + DEFINE(PT_RHI, offsetof(struct pt_regs, rhi)); + DEFINE(PT_RLO, offsetof(struct pt_regs, rlo)); +#endif + DEFINE(PT_USP, offsetof(struct pt_regs, usp)); + DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs)); + + /* offsets into the irq_cpustat_t struct */ + DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, + __softirq_pending)); + + /* signal defines */ + DEFINE(SIGSEGV, SIGSEGV); + DEFINE(SIGTRAP, SIGTRAP); + + return 0; +} diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S new file mode 100644 index 0000000000..e73e548f78 --- /dev/null +++ b/arch/csky/kernel/atomic.S @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include + +.text + +/* + * int csky_cmpxchg(int oldval, int newval, int *ptr) + * + * If *ptr != oldval && return 1, + * else *ptr = newval return 0. + */ +ENTRY(csky_cmpxchg) + USPTOKSP + + RD_MEH a3 + WR_MEH a3 + + mfcr a3, epc + addi a3, TRAP0_SIZE + + subi sp, 16 + stw a3, (sp, 0) + mfcr a3, epsr + stw a3, (sp, 4) + mfcr a3, usp + stw a3, (sp, 8) + + psrset ee +#ifdef CONFIG_CPU_HAS_LDSTEX +1: + ldex a3, (a2) + cmpne a0, a3 + bt16 2f + mov a3, a1 + stex a3, (a2) + bez a3, 1b +2: + sync.is +#else +GLOBAL(csky_cmpxchg_ldw) + ldw a3, (a2) + cmpne a0, a3 + bt16 3f +GLOBAL(csky_cmpxchg_stw) + stw a1, (a2) +3: +#endif + mvc a0 + ldw a3, (sp, 0) + mtcr a3, epc + ldw a3, (sp, 4) + mtcr a3, epsr + ldw a3, (sp, 8) + mtcr a3, usp + addi sp, 16 + KSPTOUSP + rte +END(csky_cmpxchg) diff --git a/arch/csky/kernel/cpu-probe.c b/arch/csky/kernel/cpu-probe.c new file mode 100644 index 0000000000..5f15ca31d3 --- /dev/null +++ b/arch/csky/kernel/cpu-probe.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +#include + +static void percpu_print(void *arg) +{ + struct seq_file *m = (struct seq_file *)arg; + unsigned int cur, next, i; + + seq_printf(m, "processor : %d\n", smp_processor_id()); + seq_printf(m, "C-SKY CPU model : %s\n", CSKYCPU_DEF_NAME); + + /* read processor id, max is 100 */ + cur = mfcr("cr13"); + for (i = 0; i < 100; i++) { + seq_printf(m, "product info[%d] : 0x%08x\n", i, cur); + + next = mfcr("cr13"); + + /* some CPU only has one id reg */ + if (cur == next) + break; + + cur = next; + + /* cpid index is 31-28, reset */ + if (!(next >> 28)) { + while ((mfcr("cr13") >> 28) != i); + break; + } + } + + /* CPU feature regs, setup by bootloader or gdbinit */ + seq_printf(m, "hint (CPU funcs): 0x%08x\n", mfcr_hint()); + seq_printf(m, "ccr (L1C & MMU): 0x%08x\n", mfcr("cr18")); + seq_printf(m, "ccr2 (L2C) : 0x%08x\n", mfcr_ccr2()); + seq_printf(m, "\n"); +} + +static int c_show(struct seq_file *m, void *v) +{ + int cpu; + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, percpu_print, m, true); + +#ifdef CSKY_ARCH_VERSION + seq_printf(m, "arch-version : %s\n", CSKY_ARCH_VERSION); + seq_printf(m, "\n"); +#endif + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void c_stop(struct seq_file *m, void *v) {} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show, +}; diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S new file mode 100644 index 0000000000..c68cdcc76d --- /dev/null +++ b/arch/csky/kernel/entry.S @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +.macro zero_fp +#ifdef CONFIG_STACKTRACE + movi r8, 0 +#endif +.endm + +.macro context_tracking +#ifdef CONFIG_CONTEXT_TRACKING_USER + mfcr a0, epsr + btsti a0, 31 + bt 1f + jbsr user_exit_callable + ldw a0, (sp, LSAVE_A0) + ldw a1, (sp, LSAVE_A1) + ldw a2, (sp, LSAVE_A2) + ldw a3, (sp, LSAVE_A3) +#if defined(__CSKYABIV1__) + ldw r6, (sp, LSAVE_A4) + ldw r7, (sp, LSAVE_A5) +#endif +1: +#endif +.endm + +.text +ENTRY(csky_pagefault) + SAVE_ALL 0 + zero_fp + context_tracking + psrset ee + mov a0, sp + jbsr do_page_fault + jmpi ret_from_exception + +ENTRY(csky_systemcall) + SAVE_ALL TRAP0_SIZE + zero_fp + context_tracking + psrset ee, ie + + lrw r9, __NR_syscalls + cmphs syscallid, r9 /* Check nr of syscall */ + bt ret_from_exception + + lrw r9, sys_call_table + ixw r9, syscallid + ldw syscallid, (r9) + cmpnei syscallid, 0 + bf ret_from_exception + + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_SYSCALL_WORK + and r10, r9 + cmpnei r10, 0 + bt csky_syscall_trace +#if defined(__CSKYABIV2__) + subi sp, 8 + stw r5, (sp, 0x4) + stw r4, (sp, 0x0) + jsr syscallid /* Do system call */ + addi sp, 8 +#else + jsr syscallid +#endif + stw a0, (sp, LSAVE_A0) /* Save return value */ + jmpi ret_from_exception + +csky_syscall_trace: + mov a0, sp /* sp = pt_regs pointer */ + jbsr syscall_trace_enter + cmpnei a0, 0 + bt 1f + /* Prepare args before do system call */ + ldw a0, (sp, LSAVE_A0) + ldw a1, (sp, LSAVE_A1) + ldw a2, (sp, LSAVE_A2) + ldw a3, (sp, LSAVE_A3) +#if defined(__CSKYABIV2__) + subi sp, 8 + ldw r9, (sp, LSAVE_A4) + stw r9, (sp, 0x0) + ldw r9, (sp, LSAVE_A5) + stw r9, (sp, 0x4) + jsr syscallid /* Do system call */ + addi sp, 8 +#else + ldw r6, (sp, LSAVE_A4) + ldw r7, (sp, LSAVE_A5) + jsr syscallid /* Do system call */ +#endif + stw a0, (sp, LSAVE_A0) /* Save return value */ + +1: + mov a0, sp /* right now, sp --> pt_regs */ + jbsr syscall_trace_exit + br ret_from_exception + +ENTRY(ret_from_kernel_thread) + jbsr schedule_tail + mov a0, r10 + jsr r9 + jbsr ret_from_exception + +ENTRY(ret_from_fork) + jbsr schedule_tail + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_SYSCALL_WORK + and r10, r9 + cmpnei r10, 0 + bf ret_from_exception + mov a0, sp /* sp = pt_regs pointer */ + jbsr syscall_trace_exit + +ret_from_exception: + psrclr ie + ld r9, (sp, LSAVE_PSR) + btsti r9, 31 + + bt 1f + /* + * Load address of current->thread_info, Then get address of task_struct + * Get task_needreshed in task_struct + */ + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_WORK_MASK + and r10, r9 + cmpnei r10, 0 + bt exit_work +#ifdef CONFIG_CONTEXT_TRACKING_USER + jbsr user_enter_callable +#endif +1: +#ifdef CONFIG_PREEMPTION + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + + ldw r10, (r9, TINFO_PREEMPT) + cmpnei r10, 0 + bt 2f + jbsr preempt_schedule_irq /* irq en/disable is done inside */ +2: +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS + ld r10, (sp, LSAVE_PSR) + btsti r10, 6 + bf 2f + jbsr trace_hardirqs_on +2: +#endif + RESTORE_ALL + +exit_work: + lrw r9, ret_from_exception + mov lr, r9 + + btsti r10, TIF_NEED_RESCHED + bt work_resched + + psrset ie + mov a0, sp + mov a1, r10 + jmpi do_notify_resume + +work_resched: + jmpi schedule + +ENTRY(csky_trap) + SAVE_ALL 0 + zero_fp + context_tracking + psrset ee + mov a0, sp /* Push Stack pointer arg */ + jbsr trap_c /* Call C-level trap handler */ + jmpi ret_from_exception + +/* + * Prototype from libc for abiv1: + * register unsigned int __result asm("a0"); + * asm( "trap 3" :"=r"(__result)::); + */ +ENTRY(csky_get_tls) + USPTOKSP + + RD_MEH a0 + WR_MEH a0 + + /* increase epc for continue */ + mfcr a0, epc + addi a0, TRAP0_SIZE + mtcr a0, epc + + /* get current task thread_info with kernel 8K stack */ + bmaski a0, THREAD_SHIFT + not a0 + subi sp, 1 + and a0, sp + addi sp, 1 + + /* get tls */ + ldw a0, (a0, TINFO_TP_VALUE) + + KSPTOUSP + rte + +ENTRY(csky_irq) + SAVE_ALL 0 + zero_fp + context_tracking + psrset ee + +#ifdef CONFIG_TRACE_IRQFLAGS + jbsr trace_hardirqs_off +#endif + + + mov a0, sp + jbsr generic_handle_arch_irq + + jmpi ret_from_exception + +/* + * a0 = prev task_struct * + * a1 = next task_struct * + * a0 = return next + */ +ENTRY(__switch_to) + lrw a3, TASK_THREAD + addu a3, a0 + + SAVE_SWITCH_STACK + + stw sp, (a3, THREAD_KSP) + + /* Set up next process to run */ + lrw a3, TASK_THREAD + addu a3, a1 + + ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ + +#if defined(__CSKYABIV2__) + addi a3, a1, TASK_THREAD_INFO + ldw tls, (a3, TINFO_TP_VALUE) +#endif + + RESTORE_SWITCH_STACK + + rts +ENDPROC(__switch_to) diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c new file mode 100644 index 0000000000..50bfcf1290 --- /dev/null +++ b/arch/csky/kernel/ftrace.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define NOP 0x4000 +#define NOP32_HI 0xc400 +#define NOP32_LO 0x4820 +#define PUSH_LR 0x14d0 +#define MOVIH_LINK 0xea3a +#define ORI_LINK 0xef5a +#define JSR_LINK 0xe8fa +#define BSR_LINK 0xe000 + +/* + * Gcc-csky with -pg will insert stub in function prologue: + * push lr + * jbsr _mcount + * nop32 + * nop32 + * + * If the (callee - current_pc) is less then 64MB, we'll use bsr: + * push lr + * bsr _mcount + * nop32 + * nop32 + * else we'll use (movih + ori + jsr): + * push lr + * movih r26, ... + * ori r26, ... + * jsr r26 + * + * (r26 is our reserved link-reg) + * + */ +static inline void make_jbsr(unsigned long callee, unsigned long pc, + uint16_t *call, bool nolr) +{ + long offset; + + call[0] = nolr ? NOP : PUSH_LR; + + offset = (long) callee - (long) pc; + + if (unlikely(offset < -67108864 || offset > 67108864)) { + call[1] = MOVIH_LINK; + call[2] = callee >> 16; + call[3] = ORI_LINK; + call[4] = callee & 0xffff; + call[5] = JSR_LINK; + call[6] = 0; + } else { + offset = offset >> 1; + + call[1] = BSR_LINK | + ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); + call[2] = (uint16_t)((unsigned long) offset & 0xffff); + call[3] = call[5] = NOP32_HI; + call[4] = call[6] = NOP32_LO; + } +} + +static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO, + NOP32_HI, NOP32_LO}; +static int ftrace_check_current_nop(unsigned long hook) +{ + uint16_t olds[7]; + unsigned long hook_pos = hook - 2; + + if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos, + sizeof(nops))) + return -EFAULT; + + if (memcmp((void *)nops, (void *)olds, sizeof(nops))) { + pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n", + (void *)hook_pos, + olds[0], olds[1], olds[2], olds[3], olds[4], olds[5], + olds[6]); + + return -EINVAL; + } + + return 0; +} + +static int ftrace_modify_code(unsigned long hook, unsigned long target, + bool enable, bool nolr) +{ + uint16_t call[7]; + + unsigned long hook_pos = hook - 2; + int ret = 0; + + make_jbsr(target, hook, call, nolr); + + ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops, + sizeof(nops)); + if (ret) + return -EPERM; + + flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE); + + return 0; +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + int ret = ftrace_check_current_nop(rec->ip); + + if (ret) + return ret; + + return ftrace_modify_code(rec->ip, addr, true, false); +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, addr, false, false); +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + int ret = ftrace_modify_code((unsigned long)&ftrace_call, + (unsigned long)func, true, true); + if (!ret) + ret = ftrace_modify_code((unsigned long)&ftrace_regs_call, + (unsigned long)func, true, true); + return ret; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, addr, true, true); +} +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + + if (!function_graph_enter(old, self_addr, + *(unsigned long *)frame_pointer, parent)) { + /* + * For csky-gcc function has sub-call: + * subi sp, sp, 8 + * stw r8, (sp, 0) + * mov r8, sp + * st.w r15, (sp, 0x4) + * push r15 + * jl _mcount + * We only need set *parent for resume + * + * For csky-gcc function has no sub-call: + * subi sp, sp, 4 + * stw r8, (sp, 0) + * mov r8, sp + * push r15 + * jl _mcount + * We need set *parent and *(frame_pointer + 4) for resume, + * because lr is resumed twice. + */ + *parent = return_hooker; + frame_pointer += 4; + if (*(unsigned long *)frame_pointer == old) + *(unsigned long *)frame_pointer = return_hooker; + } +} + +#ifdef CONFIG_DYNAMIC_FTRACE +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_code((unsigned long)&ftrace_graph_call, + (unsigned long)&ftrace_graph_caller, true, true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_code((unsigned long)&ftrace_graph_call, + (unsigned long)&ftrace_graph_caller, false, true); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_DYNAMIC_FTRACE +#ifndef CONFIG_CPU_HAS_ICACHE_INS +struct ftrace_modify_param { + int command; + atomic_t cpu_count; +}; + +static int __ftrace_modify_code(void *data) +{ + struct ftrace_modify_param *param = data; + + if (atomic_inc_return(¶m->cpu_count) == 1) { + ftrace_modify_all_code(param->command); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + local_icache_inv_all(NULL); + } + + return 0; +} + +void arch_ftrace_update_code(int command) +{ + struct ftrace_modify_param param = { command, ATOMIC_INIT(0) }; + + stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask); +} +#endif +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* _mcount is defined in abi's mcount.S */ +EXPORT_SYMBOL(_mcount); diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S new file mode 100644 index 0000000000..7e3e4f15b0 --- /dev/null +++ b/arch/csky/kernel/head.S @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include +#include + +__HEAD +ENTRY(_start) + SETUP_MMU + + /* set stack point */ + lrw r6, init_thread_union + THREAD_SIZE + mov sp, r6 + + jmpi csky_start +END(_start) + +#ifdef CONFIG_SMP +.align 10 +ENTRY(_start_smp_secondary) + SETUP_MMU + +#ifdef CONFIG_PAGE_OFFSET_80000000 + lrw r6, secondary_msa1 + ld.w r6, (r6, 0) + mtcr r6, cr<31, 15> +#endif + + lrw r6, secondary_pgd + ld.w r6, (r6, 0) + mtcr r6, cr<28, 15> + mtcr r6, cr<29, 15> + + /* set stack point */ + lrw r6, secondary_stack + ld.w r6, (r6, 0) + mov sp, r6 + + jmpi csky_start_secondary +END(_start_smp_secondary) +#endif diff --git a/arch/csky/kernel/io.c b/arch/csky/kernel/io.c new file mode 100644 index 0000000000..5883f13fa2 --- /dev/null +++ b/arch/csky/kernel/io.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +/* + * Copy data from IO memory space to "real" memory space. + */ +void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) +{ + while (count && !IS_ALIGNED((unsigned long)from, 4)) { + *(u8 *)to = __raw_readb(from); + from++; + to++; + count--; + } + + while (count >= 4) { + *(u32 *)to = __raw_readl(from); + from += 4; + to += 4; + count -= 4; + } + + while (count) { + *(u8 *)to = __raw_readb(from); + from++; + to++; + count--; + } +} +EXPORT_SYMBOL(__memcpy_fromio); + +/* + * Copy data from "real" memory space to IO memory space. + */ +void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) +{ + while (count && !IS_ALIGNED((unsigned long)to, 4)) { + __raw_writeb(*(u8 *)from, to); + from++; + to++; + count--; + } + + while (count >= 4) { + __raw_writel(*(u32 *)from, to); + from += 4; + to += 4; + count -= 4; + } + + while (count) { + __raw_writeb(*(u8 *)from, to); + from++; + to++; + count--; + } +} +EXPORT_SYMBOL(__memcpy_toio); + +/* + * "memset" on IO memory space. + */ +void __memset_io(volatile void __iomem *dst, int c, size_t count) +{ + u32 qc = (u8)c; + + qc |= qc << 8; + qc |= qc << 16; + + while (count && !IS_ALIGNED((unsigned long)dst, 4)) { + __raw_writeb(c, dst); + dst++; + count--; + } + + while (count >= 4) { + __raw_writel(qc, dst); + dst += 4; + count -= 4; + } + + while (count) { + __raw_writeb(c, dst); + dst++; + count--; + } +} +EXPORT_SYMBOL(__memset_io); diff --git a/arch/csky/kernel/irq.c b/arch/csky/kernel/irq.c new file mode 100644 index 0000000000..fcdaf31562 --- /dev/null +++ b/arch/csky/kernel/irq.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include + +void __init init_IRQ(void) +{ + irqchip_init(); +#ifdef CONFIG_SMP + setup_smp_ipi(); +#endif +} diff --git a/arch/csky/kernel/jump_label.c b/arch/csky/kernel/jump_label.c new file mode 100644 index 0000000000..d0e8b21447 --- /dev/null +++ b/arch/csky/kernel/jump_label.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include + +#define NOP32_HI 0xc400 +#define NOP32_LO 0x4820 +#define BSR_LINK 0xe000 + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + unsigned long addr = jump_entry_code(entry); + u16 insn[2]; + int ret = 0; + + if (type == JUMP_LABEL_JMP) { + long offset = jump_entry_target(entry) - jump_entry_code(entry); + + if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864)) + return; + + offset = offset >> 1; + + insn[0] = BSR_LINK | + ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); + insn[1] = (uint16_t)((unsigned long) offset & 0xffff); + } else { + insn[0] = NOP32_HI; + insn[1] = NOP32_LO; + } + + ret = copy_to_kernel_nofault((void *)addr, insn, 4); + WARN_ON(ret); + + flush_icache_range(addr, addr + 4); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * We use the same instructions in the arch_static_branch and + * arch_static_branch_jump inline functions, so there's no + * need to patch them up here. + * The core will call arch_jump_label_transform when those + * instructions need to be replaced. + */ + arch_jump_label_transform(entry, type); +} diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c new file mode 100644 index 0000000000..0b56a8cd12 --- /dev/null +++ b/arch/csky/kernel/module.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_CPU_CK810 +#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) +#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) + +#define CHANGE_JSRI_TO_LRW(addr) do { \ + *(uint16_t *)(addr) = (*(uint16_t *)(addr) & 0xFF9F) | 0x001a; \ + *((uint16_t *)(addr) + 1) = *((uint16_t *)(addr) + 1) & 0xFFFF; \ +} while (0) + +#define SET_JSR32_R26(addr) do { \ + *(uint16_t *)(addr) = 0xE8Fa; \ + *((uint16_t *)(addr) + 1) = 0x0000; \ +} while (0) + +static void jsri_2_lrw_jsr(uint32_t *location) +{ + uint16_t *location_tmp = (uint16_t *)location; + + if (IS_BSR32(*location_tmp, *(location_tmp + 1))) + return; + + if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { + /* jsri 0x... --> lrw r26, 0x... */ + CHANGE_JSRI_TO_LRW(location); + /* lsli r0, r0 --> jsr r26 */ + SET_JSR32_R26(location + 1); + } +} +#else +static inline void jsri_2_lrw_jsr(uint32_t *location) +{ + return; +} +#endif + +int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, struct module *me) +{ + unsigned int i; + Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + short *temp; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_CSKY_32: + /* We add the value into the location given */ + *location = rel[i].r_addend + sym->st_value; + break; + case R_CSKY_PC32: + /* Add the value, subtract its position */ + *location = rel[i].r_addend + sym->st_value + - (uint32_t)location; + break; + case R_CSKY_PCRELJSR_IMM11BY2: + break; + case R_CSKY_PCRELJSR_IMM26BY2: + jsri_2_lrw_jsr(location); + break; + case R_CSKY_ADDR_HI16: + temp = ((short *)location) + 1; + *temp = (short) + ((rel[i].r_addend + sym->st_value) >> 16); + break; + case R_CSKY_ADDR_LO16: + temp = ((short *)location) + 1; + *temp = (short) + ((rel[i].r_addend + sym->st_value) & 0xffff); + break; + default: + pr_err("module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c new file mode 100644 index 0000000000..1612f43540 --- /dev/null +++ b/arch/csky/kernel/perf_callchain.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include + +/* Kernel callchain */ +struct stackframe { + unsigned long fp; + unsigned long lr; +}; + +static int unwind_frame_kernel(struct stackframe *frame) +{ + unsigned long low = (unsigned long)task_stack_page(current); + unsigned long high = low + THREAD_SIZE; + + if (unlikely(frame->fp < low || frame->fp > high)) + return -EPERM; + + if (kstack_end((void *)frame->fp) || frame->fp & 0x3) + return -EPERM; + + *frame = *(struct stackframe *)frame->fp; + + if (__kernel_text_address(frame->lr)) { + int graph = 0; + + frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr, + NULL); + } + return 0; +} + +static void notrace walk_stackframe(struct stackframe *fr, + struct perf_callchain_entry_ctx *entry) +{ + do { + perf_callchain_store(entry, fr->lr); + } while (unwind_frame_kernel(fr) >= 0); +} + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, + unsigned long fp, unsigned long reg_lr) +{ + struct stackframe buftail; + unsigned long lr = 0; + unsigned long __user *user_frame_tail = (unsigned long __user *)fp; + + /* Check accessibility of one struct frame_tail beyond */ + if (!access_ok(user_frame_tail, sizeof(buftail))) + return 0; + if (__copy_from_user_inatomic(&buftail, user_frame_tail, + sizeof(buftail))) + return 0; + + if (reg_lr != 0) + lr = reg_lr; + else + lr = buftail.lr; + + fp = buftail.fp; + perf_callchain_store(entry, lr); + + return fp; +} + +/* + * This will be called when the target is in user mode + * This function will only be called when we use + * "PERF_SAMPLE_CALLCHAIN" in + * kernel/events/core.c:perf_prepare_sample() + * + * How to trigger perf_callchain_[user/kernel] : + * $ perf record -e cpu-clock --call-graph fp ./program + * $ perf report --call-graph + * + * On C-SKY platform, the program being sampled and the C library + * need to be compiled with * -mbacktrace, otherwise the user + * stack will not contain function frame. + */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long fp = 0; + + fp = regs->regs[4]; + perf_callchain_store(entry, regs->pc); + + /* + * While backtrace from leaf function, lr is normally + * not saved inside frame on C-SKY, so get lr from pt_regs + * at the sample point. However, lr value can be incorrect if + * lr is used as temp register + */ + fp = user_backtrace(entry, fp, regs->lr); + + while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) + fp = user_backtrace(entry, fp, 0); +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct stackframe fr; + + fr.fp = regs->regs[4]; + fr.lr = regs->lr; + walk_stackframe(&fr, entry); +} diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c new file mode 100644 index 0000000000..e5f18420ce --- /dev/null +++ b/arch/csky/kernel/perf_event.c @@ -0,0 +1,1371 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include + +#define CSKY_PMU_MAX_EVENTS 32 +#define DEFAULT_COUNT_WIDTH 48 + +#define HPCR "<0, 0x0>" /* PMU Control reg */ +#define HPSPR "<0, 0x1>" /* Start PC reg */ +#define HPEPR "<0, 0x2>" /* End PC reg */ +#define HPSIR "<0, 0x3>" /* Soft Counter reg */ +#define HPCNTENR "<0, 0x4>" /* Count Enable reg */ +#define HPINTENR "<0, 0x5>" /* Interrupt Enable reg */ +#define HPOFSR "<0, 0x6>" /* Interrupt Status reg */ + +/* The events for a given PMU register set. */ +struct pmu_hw_events { + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event *events[CSKY_PMU_MAX_EVENTS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long used_mask[BITS_TO_LONGS(CSKY_PMU_MAX_EVENTS)]; +}; + +static uint64_t (*hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])(void); +static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val); + +static struct csky_pmu_t { + struct pmu pmu; + struct pmu_hw_events __percpu *hw_events; + struct platform_device *plat_device; + uint32_t count_width; + uint32_t hpcr; + u64 max_period; +} csky_pmu; +static int csky_pmu_irq; + +#define to_csky_pmu(p) (container_of(p, struct csky_pmu, pmu)) + +#define cprgr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprgr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define cpwgr(reg, val) \ +({ \ + asm volatile( \ + "cpwgr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +#define cprcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprcr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define cpwcr(reg, val) \ +({ \ + asm volatile( \ + "cpwcr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +/* cycle counter */ +uint64_t csky_pmu_read_cc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x3>"); + lo = cprgr("<0, 0x2>"); + hi = cprgr("<0, 0x3>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cc(uint64_t val) +{ + cpwgr("<0, 0x2>", (uint32_t) val); + cpwgr("<0, 0x3>", (uint32_t) (val >> 32)); +} + +/* instruction counter */ +static uint64_t csky_pmu_read_ic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x5>"); + lo = cprgr("<0, 0x4>"); + hi = cprgr("<0, 0x5>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ic(uint64_t val) +{ + cpwgr("<0, 0x4>", (uint32_t) val); + cpwgr("<0, 0x5>", (uint32_t) (val >> 32)); +} + +/* l1 icache access counter */ +static uint64_t csky_pmu_read_icac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x7>"); + lo = cprgr("<0, 0x6>"); + hi = cprgr("<0, 0x7>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_icac(uint64_t val) +{ + cpwgr("<0, 0x6>", (uint32_t) val); + cpwgr("<0, 0x7>", (uint32_t) (val >> 32)); +} + +/* l1 icache miss counter */ +static uint64_t csky_pmu_read_icmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x9>"); + lo = cprgr("<0, 0x8>"); + hi = cprgr("<0, 0x9>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_icmc(uint64_t val) +{ + cpwgr("<0, 0x8>", (uint32_t) val); + cpwgr("<0, 0x9>", (uint32_t) (val >> 32)); +} + +/* l1 dcache access counter */ +static uint64_t csky_pmu_read_dcac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xb>"); + lo = cprgr("<0, 0xa>"); + hi = cprgr("<0, 0xb>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcac(uint64_t val) +{ + cpwgr("<0, 0xa>", (uint32_t) val); + cpwgr("<0, 0xb>", (uint32_t) (val >> 32)); +} + +/* l1 dcache miss counter */ +static uint64_t csky_pmu_read_dcmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xd>"); + lo = cprgr("<0, 0xc>"); + hi = cprgr("<0, 0xd>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcmc(uint64_t val) +{ + cpwgr("<0, 0xc>", (uint32_t) val); + cpwgr("<0, 0xd>", (uint32_t) (val >> 32)); +} + +/* l2 cache access counter */ +static uint64_t csky_pmu_read_l2ac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xf>"); + lo = cprgr("<0, 0xe>"); + hi = cprgr("<0, 0xf>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2ac(uint64_t val) +{ + cpwgr("<0, 0xe>", (uint32_t) val); + cpwgr("<0, 0xf>", (uint32_t) (val >> 32)); +} + +/* l2 cache miss counter */ +static uint64_t csky_pmu_read_l2mc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x11>"); + lo = cprgr("<0, 0x10>"); + hi = cprgr("<0, 0x11>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2mc(uint64_t val) +{ + cpwgr("<0, 0x10>", (uint32_t) val); + cpwgr("<0, 0x11>", (uint32_t) (val >> 32)); +} + +/* I-UTLB miss counter */ +static uint64_t csky_pmu_read_iutlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x15>"); + lo = cprgr("<0, 0x14>"); + hi = cprgr("<0, 0x15>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_iutlbmc(uint64_t val) +{ + cpwgr("<0, 0x14>", (uint32_t) val); + cpwgr("<0, 0x15>", (uint32_t) (val >> 32)); +} + +/* D-UTLB miss counter */ +static uint64_t csky_pmu_read_dutlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x17>"); + lo = cprgr("<0, 0x16>"); + hi = cprgr("<0, 0x17>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dutlbmc(uint64_t val) +{ + cpwgr("<0, 0x16>", (uint32_t) val); + cpwgr("<0, 0x17>", (uint32_t) (val >> 32)); +} + +/* JTLB miss counter */ +static uint64_t csky_pmu_read_jtlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x19>"); + lo = cprgr("<0, 0x18>"); + hi = cprgr("<0, 0x19>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_jtlbmc(uint64_t val) +{ + cpwgr("<0, 0x18>", (uint32_t) val); + cpwgr("<0, 0x19>", (uint32_t) (val >> 32)); +} + +/* software counter */ +static uint64_t csky_pmu_read_softc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1b>"); + lo = cprgr("<0, 0x1a>"); + hi = cprgr("<0, 0x1b>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_softc(uint64_t val) +{ + cpwgr("<0, 0x1a>", (uint32_t) val); + cpwgr("<0, 0x1b>", (uint32_t) (val >> 32)); +} + +/* conditional branch mispredict counter */ +static uint64_t csky_pmu_read_cbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1d>"); + lo = cprgr("<0, 0x1c>"); + hi = cprgr("<0, 0x1d>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cbmc(uint64_t val) +{ + cpwgr("<0, 0x1c>", (uint32_t) val); + cpwgr("<0, 0x1d>", (uint32_t) (val >> 32)); +} + +/* conditional branch instruction counter */ +static uint64_t csky_pmu_read_cbic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1f>"); + lo = cprgr("<0, 0x1e>"); + hi = cprgr("<0, 0x1f>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cbic(uint64_t val) +{ + cpwgr("<0, 0x1e>", (uint32_t) val); + cpwgr("<0, 0x1f>", (uint32_t) (val >> 32)); +} + +/* indirect branch mispredict counter */ +static uint64_t csky_pmu_read_ibmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x21>"); + lo = cprgr("<0, 0x20>"); + hi = cprgr("<0, 0x21>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ibmc(uint64_t val) +{ + cpwgr("<0, 0x20>", (uint32_t) val); + cpwgr("<0, 0x21>", (uint32_t) (val >> 32)); +} + +/* indirect branch instruction counter */ +static uint64_t csky_pmu_read_ibic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x23>"); + lo = cprgr("<0, 0x22>"); + hi = cprgr("<0, 0x23>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ibic(uint64_t val) +{ + cpwgr("<0, 0x22>", (uint32_t) val); + cpwgr("<0, 0x23>", (uint32_t) (val >> 32)); +} + +/* LSU spec fail counter */ +static uint64_t csky_pmu_read_lsfc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x25>"); + lo = cprgr("<0, 0x24>"); + hi = cprgr("<0, 0x25>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_lsfc(uint64_t val) +{ + cpwgr("<0, 0x24>", (uint32_t) val); + cpwgr("<0, 0x25>", (uint32_t) (val >> 32)); +} + +/* store instruction counter */ +static uint64_t csky_pmu_read_sic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x27>"); + lo = cprgr("<0, 0x26>"); + hi = cprgr("<0, 0x27>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_sic(uint64_t val) +{ + cpwgr("<0, 0x26>", (uint32_t) val); + cpwgr("<0, 0x27>", (uint32_t) (val >> 32)); +} + +/* dcache read access counter */ +static uint64_t csky_pmu_read_dcrac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x29>"); + lo = cprgr("<0, 0x28>"); + hi = cprgr("<0, 0x29>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcrac(uint64_t val) +{ + cpwgr("<0, 0x28>", (uint32_t) val); + cpwgr("<0, 0x29>", (uint32_t) (val >> 32)); +} + +/* dcache read miss counter */ +static uint64_t csky_pmu_read_dcrmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2b>"); + lo = cprgr("<0, 0x2a>"); + hi = cprgr("<0, 0x2b>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcrmc(uint64_t val) +{ + cpwgr("<0, 0x2a>", (uint32_t) val); + cpwgr("<0, 0x2b>", (uint32_t) (val >> 32)); +} + +/* dcache write access counter */ +static uint64_t csky_pmu_read_dcwac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2d>"); + lo = cprgr("<0, 0x2c>"); + hi = cprgr("<0, 0x2d>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcwac(uint64_t val) +{ + cpwgr("<0, 0x2c>", (uint32_t) val); + cpwgr("<0, 0x2d>", (uint32_t) (val >> 32)); +} + +/* dcache write miss counter */ +static uint64_t csky_pmu_read_dcwmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2f>"); + lo = cprgr("<0, 0x2e>"); + hi = cprgr("<0, 0x2f>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcwmc(uint64_t val) +{ + cpwgr("<0, 0x2e>", (uint32_t) val); + cpwgr("<0, 0x2f>", (uint32_t) (val >> 32)); +} + +/* l2cache read access counter */ +static uint64_t csky_pmu_read_l2rac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x31>"); + lo = cprgr("<0, 0x30>"); + hi = cprgr("<0, 0x31>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2rac(uint64_t val) +{ + cpwgr("<0, 0x30>", (uint32_t) val); + cpwgr("<0, 0x31>", (uint32_t) (val >> 32)); +} + +/* l2cache read miss counter */ +static uint64_t csky_pmu_read_l2rmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x33>"); + lo = cprgr("<0, 0x32>"); + hi = cprgr("<0, 0x33>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2rmc(uint64_t val) +{ + cpwgr("<0, 0x32>", (uint32_t) val); + cpwgr("<0, 0x33>", (uint32_t) (val >> 32)); +} + +/* l2cache write access counter */ +static uint64_t csky_pmu_read_l2wac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x35>"); + lo = cprgr("<0, 0x34>"); + hi = cprgr("<0, 0x35>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2wac(uint64_t val) +{ + cpwgr("<0, 0x34>", (uint32_t) val); + cpwgr("<0, 0x35>", (uint32_t) (val >> 32)); +} + +/* l2cache write miss counter */ +static uint64_t csky_pmu_read_l2wmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x37>"); + lo = cprgr("<0, 0x36>"); + hi = cprgr("<0, 0x37>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2wmc(uint64_t val) +{ + cpwgr("<0, 0x36>", (uint32_t) val); + cpwgr("<0, 0x37>", (uint32_t) (val >> 32)); +} + +#define HW_OP_UNSUPPORTED 0xffff +static const int csky_pmu_hw_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x1, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x2, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xf, + [PERF_COUNT_HW_BRANCH_MISSES] = 0xe, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_REF_CPU_CYCLES] = HW_OP_UNSUPPORTED, +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xffff +static const int csky_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x5, + [C(RESULT_MISS)] = 0x6, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x14, + [C(RESULT_MISS)] = 0x15, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x16, + [C(RESULT_MISS)] = 0x17, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#endif + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x3, + [C(RESULT_MISS)] = 0x4, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x7, + [C(RESULT_MISS)] = 0x8, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x18, + [C(RESULT_MISS)] = 0x19, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x1a, + [C(RESULT_MISS)] = 0x1b, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#endif + }, + [C(DTLB)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x14, + [C(RESULT_MISS)] = 0xb, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x16, + [C(RESULT_MISS)] = 0xb, + }, +#endif + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x3, + [C(RESULT_MISS)] = 0xa, + }, +#endif + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +int csky_pmu_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)csky_pmu.max_period) + left = csky_pmu.max_period; + + /* + * The hw event starts counting from this event offset, + * mark it to be able to extract future "deltas": + */ + local64_set(&hwc->prev_count, (u64)(-left)); + + if (hw_raw_write_mapping[hwc->idx] != NULL) + hw_raw_write_mapping[hwc->idx]((u64)(-left) & + csky_pmu.max_period); + + cpwcr(HPOFSR, ~BIT(hwc->idx) & cprcr(HPOFSR)); + + perf_event_update_userpage(event); + + return ret; +} + +static void csky_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc) +{ + uint64_t prev_raw_count = local64_read(&hwc->prev_count); + /* + * Sign extend count value to 64bit, otherwise delta calculation + * would be incorrect when overflow occurs. + */ + uint64_t new_raw_count = sign_extend64( + hw_raw_read_mapping[hwc->idx](), csky_pmu.count_width - 1); + int64_t delta = new_raw_count - prev_raw_count; + + /* + * We aren't afraid of hwc->prev_count changing beneath our feet + * because there's no way for us to re-enter this function anytime. + */ + local64_set(&hwc->prev_count, new_raw_count); + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static void csky_pmu_reset(void *info) +{ + cpwcr(HPCR, BIT(31) | BIT(30) | BIT(1)); +} + +static void csky_pmu_read(struct perf_event *event) +{ + csky_perf_event_update(event, &event->hw); +} + +static int csky_pmu_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + return csky_pmu_cache_map[cache_type][cache_op][cache_result]; +} + +static int csky_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int ret; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -ENOENT; + ret = csky_pmu_hw_map[event->attr.config]; + if (ret == HW_OP_UNSUPPORTED) + return -ENOENT; + hwc->idx = ret; + break; + case PERF_TYPE_HW_CACHE: + ret = csky_pmu_cache_event(event->attr.config); + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + hwc->idx = ret; + break; + case PERF_TYPE_RAW: + if (hw_raw_read_mapping[event->attr.config] == NULL) + return -ENOENT; + hwc->idx = event->attr.config; + break; + default: + return -ENOENT; + } + + if (event->attr.exclude_user) + csky_pmu.hpcr = BIT(2); + else if (event->attr.exclude_kernel) + csky_pmu.hpcr = BIT(3); + else + csky_pmu.hpcr = BIT(2) | BIT(3); + + csky_pmu.hpcr |= BIT(1) | BIT(0); + + return 0; +} + +/* starts all counters */ +static void csky_pmu_enable(struct pmu *pmu) +{ + cpwcr(HPCR, csky_pmu.hpcr); +} + +/* stops all counters */ +static void csky_pmu_disable(struct pmu *pmu) +{ + cpwcr(HPCR, BIT(1)); +} + +static void csky_pmu_start(struct perf_event *event, int flags) +{ + unsigned long flg; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + csky_pmu_event_set_period(event); + + local_irq_save(flg); + + cpwcr(HPINTENR, BIT(idx) | cprcr(HPINTENR)); + cpwcr(HPCNTENR, BIT(idx) | cprcr(HPCNTENR)); + + local_irq_restore(flg); +} + +static void csky_pmu_stop_event(struct perf_event *event) +{ + unsigned long flg; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + local_irq_save(flg); + + cpwcr(HPINTENR, ~BIT(idx) & cprcr(HPINTENR)); + cpwcr(HPCNTENR, ~BIT(idx) & cprcr(HPCNTENR)); + + local_irq_restore(flg); +} + +static void csky_pmu_stop(struct perf_event *event, int flags) +{ + if (!(event->hw.state & PERF_HES_STOPPED)) { + csky_pmu_stop_event(event); + event->hw.state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && + !(event->hw.state & PERF_HES_UPTODATE)) { + csky_perf_event_update(event, &event->hw); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void csky_pmu_del(struct perf_event *event, int flags) +{ + struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events); + struct hw_perf_event *hwc = &event->hw; + + csky_pmu_stop(event, PERF_EF_UPDATE); + + hw_events->events[hwc->idx] = NULL; + + perf_event_update_userpage(event); +} + +/* allocate hardware counter and optionally start counting */ +static int csky_pmu_add(struct perf_event *event, int flags) +{ + struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events); + struct hw_perf_event *hwc = &event->hw; + + hw_events->events[hwc->idx] = event; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + csky_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static irqreturn_t csky_pmu_handle_irq(int irq_num, void *dev) +{ + struct perf_sample_data data; + struct pmu_hw_events *cpuc = this_cpu_ptr(csky_pmu.hw_events); + struct pt_regs *regs; + int idx; + + /* + * Did an overflow occur? + */ + if (!cprcr(HPOFSR)) + return IRQ_NONE; + + /* + * Handle the counter(s) overflow(s) + */ + regs = get_irq_regs(); + + csky_pmu_disable(&csky_pmu.pmu); + + for (idx = 0; idx < CSKY_PMU_MAX_EVENTS; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + /* Ignore if we don't have an event. */ + if (!event) + continue; + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!(cprcr(HPOFSR) & BIT(idx))) + continue; + + hwc = &event->hw; + csky_perf_event_update(event, &event->hw); + perf_sample_data_init(&data, 0, hwc->last_period); + csky_pmu_event_set_period(event); + + if (perf_event_overflow(event, &data, regs)) + csky_pmu_stop_event(event); + } + + csky_pmu_enable(&csky_pmu.pmu); + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this + * will not work. + */ + irq_work_run(); + + return IRQ_HANDLED; +} + +static int csky_pmu_request_irq(irq_handler_t handler) +{ + int err, irqs; + struct platform_device *pmu_device = csky_pmu.plat_device; + + if (!pmu_device) + return -ENODEV; + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (irqs < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + csky_pmu_irq = platform_get_irq(pmu_device, 0); + if (csky_pmu_irq < 0) + return -ENODEV; + err = request_percpu_irq(csky_pmu_irq, handler, "csky-pmu", + this_cpu_ptr(csky_pmu.hw_events)); + if (err) { + pr_err("unable to request IRQ%d for CSKY PMU counters\n", + csky_pmu_irq); + return err; + } + + return 0; +} + +static void csky_pmu_free_irq(void) +{ + int irq; + struct platform_device *pmu_device = csky_pmu.plat_device; + + irq = platform_get_irq(pmu_device, 0); + if (irq >= 0) + free_percpu_irq(irq, this_cpu_ptr(csky_pmu.hw_events)); +} + +int init_hw_perf_events(void) +{ + csky_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events, + GFP_KERNEL); + if (!csky_pmu.hw_events) { + pr_info("failed to allocate per-cpu PMU data.\n"); + return -ENOMEM; + } + + csky_pmu.pmu = (struct pmu) { + .pmu_enable = csky_pmu_enable, + .pmu_disable = csky_pmu_disable, + .event_init = csky_pmu_event_init, + .add = csky_pmu_add, + .del = csky_pmu_del, + .start = csky_pmu_start, + .stop = csky_pmu_stop, + .read = csky_pmu_read, + }; + + memset((void *)hw_raw_read_mapping, 0, + sizeof(hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])); + + hw_raw_read_mapping[0x1] = csky_pmu_read_cc; + hw_raw_read_mapping[0x2] = csky_pmu_read_ic; + hw_raw_read_mapping[0x3] = csky_pmu_read_icac; + hw_raw_read_mapping[0x4] = csky_pmu_read_icmc; + hw_raw_read_mapping[0x5] = csky_pmu_read_dcac; + hw_raw_read_mapping[0x6] = csky_pmu_read_dcmc; + hw_raw_read_mapping[0x7] = csky_pmu_read_l2ac; + hw_raw_read_mapping[0x8] = csky_pmu_read_l2mc; + hw_raw_read_mapping[0xa] = csky_pmu_read_iutlbmc; + hw_raw_read_mapping[0xb] = csky_pmu_read_dutlbmc; + hw_raw_read_mapping[0xc] = csky_pmu_read_jtlbmc; + hw_raw_read_mapping[0xd] = csky_pmu_read_softc; + hw_raw_read_mapping[0xe] = csky_pmu_read_cbmc; + hw_raw_read_mapping[0xf] = csky_pmu_read_cbic; + hw_raw_read_mapping[0x10] = csky_pmu_read_ibmc; + hw_raw_read_mapping[0x11] = csky_pmu_read_ibic; + hw_raw_read_mapping[0x12] = csky_pmu_read_lsfc; + hw_raw_read_mapping[0x13] = csky_pmu_read_sic; + hw_raw_read_mapping[0x14] = csky_pmu_read_dcrac; + hw_raw_read_mapping[0x15] = csky_pmu_read_dcrmc; + hw_raw_read_mapping[0x16] = csky_pmu_read_dcwac; + hw_raw_read_mapping[0x17] = csky_pmu_read_dcwmc; + hw_raw_read_mapping[0x18] = csky_pmu_read_l2rac; + hw_raw_read_mapping[0x19] = csky_pmu_read_l2rmc; + hw_raw_read_mapping[0x1a] = csky_pmu_read_l2wac; + hw_raw_read_mapping[0x1b] = csky_pmu_read_l2wmc; + + memset((void *)hw_raw_write_mapping, 0, + sizeof(hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])); + + hw_raw_write_mapping[0x1] = csky_pmu_write_cc; + hw_raw_write_mapping[0x2] = csky_pmu_write_ic; + hw_raw_write_mapping[0x3] = csky_pmu_write_icac; + hw_raw_write_mapping[0x4] = csky_pmu_write_icmc; + hw_raw_write_mapping[0x5] = csky_pmu_write_dcac; + hw_raw_write_mapping[0x6] = csky_pmu_write_dcmc; + hw_raw_write_mapping[0x7] = csky_pmu_write_l2ac; + hw_raw_write_mapping[0x8] = csky_pmu_write_l2mc; + hw_raw_write_mapping[0xa] = csky_pmu_write_iutlbmc; + hw_raw_write_mapping[0xb] = csky_pmu_write_dutlbmc; + hw_raw_write_mapping[0xc] = csky_pmu_write_jtlbmc; + hw_raw_write_mapping[0xd] = csky_pmu_write_softc; + hw_raw_write_mapping[0xe] = csky_pmu_write_cbmc; + hw_raw_write_mapping[0xf] = csky_pmu_write_cbic; + hw_raw_write_mapping[0x10] = csky_pmu_write_ibmc; + hw_raw_write_mapping[0x11] = csky_pmu_write_ibic; + hw_raw_write_mapping[0x12] = csky_pmu_write_lsfc; + hw_raw_write_mapping[0x13] = csky_pmu_write_sic; + hw_raw_write_mapping[0x14] = csky_pmu_write_dcrac; + hw_raw_write_mapping[0x15] = csky_pmu_write_dcrmc; + hw_raw_write_mapping[0x16] = csky_pmu_write_dcwac; + hw_raw_write_mapping[0x17] = csky_pmu_write_dcwmc; + hw_raw_write_mapping[0x18] = csky_pmu_write_l2rac; + hw_raw_write_mapping[0x19] = csky_pmu_write_l2rmc; + hw_raw_write_mapping[0x1a] = csky_pmu_write_l2wac; + hw_raw_write_mapping[0x1b] = csky_pmu_write_l2wmc; + + return 0; +} + +static int csky_pmu_starting_cpu(unsigned int cpu) +{ + enable_percpu_irq(csky_pmu_irq, 0); + return 0; +} + +static int csky_pmu_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(csky_pmu_irq); + return 0; +} + +int csky_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table) +{ + struct device_node *node = pdev->dev.of_node; + int ret; + + ret = init_hw_perf_events(); + if (ret) { + pr_notice("[perf] failed to probe PMU!\n"); + return ret; + } + + if (of_property_read_u32(node, "count-width", + &csky_pmu.count_width)) { + csky_pmu.count_width = DEFAULT_COUNT_WIDTH; + } + csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1; + + csky_pmu.plat_device = pdev; + + /* Ensure the PMU has sane values out of reset. */ + on_each_cpu(csky_pmu_reset, &csky_pmu, 1); + + ret = csky_pmu_request_irq(csky_pmu_handle_irq); + if (ret) { + csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + pr_notice("[perf] PMU request irq fail!\n"); + } + + ret = cpuhp_setup_state(CPUHP_AP_PERF_CSKY_ONLINE, "AP_PERF_ONLINE", + csky_pmu_starting_cpu, + csky_pmu_dying_cpu); + if (ret) { + csky_pmu_free_irq(); + free_percpu(csky_pmu.hw_events); + return ret; + } + + ret = perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW); + if (ret) { + csky_pmu_free_irq(); + free_percpu(csky_pmu.hw_events); + } + + return ret; +} + +static const struct of_device_id csky_pmu_of_device_ids[] = { + {.compatible = "csky,csky-pmu"}, + {}, +}; + +static int csky_pmu_dev_probe(struct platform_device *pdev) +{ + return csky_pmu_device_probe(pdev, csky_pmu_of_device_ids); +} + +static struct platform_driver csky_pmu_driver = { + .driver = { + .name = "csky-pmu", + .of_match_table = csky_pmu_of_device_ids, + }, + .probe = csky_pmu_dev_probe, +}; + +static int __init csky_pmu_probe(void) +{ + int ret; + + ret = platform_driver_register(&csky_pmu_driver); + if (ret) + pr_notice("[perf] PMU initialization failed\n"); + else + pr_notice("[perf] PMU initialization done\n"); + + return ret; +} + +device_initcall(csky_pmu_probe); diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c new file mode 100644 index 0000000000..09b7f88a2d --- /dev/null +++ b/arch/csky/kernel/perf_regs.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_CSKY_MAX)) + return 0; + + return (u64)*((u32 *)regs + idx); +} + +#define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_32; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/csky/kernel/power.c b/arch/csky/kernel/power.c new file mode 100644 index 0000000000..86ee202906 --- /dev/null +++ b/arch/csky/kernel/power.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void machine_power_off(void) +{ + local_irq_disable(); + do_kernel_power_off(); + asm volatile ("bkpt"); +} + +void machine_halt(void) +{ + local_irq_disable(); + do_kernel_power_off(); + asm volatile ("bkpt"); +} + +void machine_restart(char *cmd) +{ + local_irq_disable(); + do_kernel_restart(cmd); + asm volatile ("bkpt"); +} diff --git a/arch/csky/kernel/probes/Makefile b/arch/csky/kernel/probes/Makefile new file mode 100644 index 0000000000..1c7c6e6cb2 --- /dev/null +++ b/arch/csky/kernel/probes/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o +obj-$(CONFIG_KPROBES) += kprobes_trampoline.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o +obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o + +CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) diff --git a/arch/csky/kernel/probes/decode-insn.c b/arch/csky/kernel/probes/decode-insn.c new file mode 100644 index 0000000000..bbc4edc25d --- /dev/null +++ b/arch/csky/kernel/probes/decode-insn.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include + +#include "decode-insn.h" +#include "simulate-insn.h" + +/* Return: + * INSN_REJECTED If instruction is one not allowed to kprobe, + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. + */ +enum probe_insn __kprobes +csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api) +{ + probe_opcode_t insn = le32_to_cpu(*addr); + + CSKY_INSN_SET_SIMULATE(br16, insn); + CSKY_INSN_SET_SIMULATE(bt16, insn); + CSKY_INSN_SET_SIMULATE(bf16, insn); + CSKY_INSN_SET_SIMULATE(jmp16, insn); + CSKY_INSN_SET_SIMULATE(jsr16, insn); + CSKY_INSN_SET_SIMULATE(lrw16, insn); + CSKY_INSN_SET_SIMULATE(pop16, insn); + + CSKY_INSN_SET_SIMULATE(br32, insn); + CSKY_INSN_SET_SIMULATE(bt32, insn); + CSKY_INSN_SET_SIMULATE(bf32, insn); + CSKY_INSN_SET_SIMULATE(jmp32, insn); + CSKY_INSN_SET_SIMULATE(jsr32, insn); + CSKY_INSN_SET_SIMULATE(lrw32, insn); + CSKY_INSN_SET_SIMULATE(pop32, insn); + + CSKY_INSN_SET_SIMULATE(bez32, insn); + CSKY_INSN_SET_SIMULATE(bnez32, insn); + CSKY_INSN_SET_SIMULATE(bnezad32, insn); + CSKY_INSN_SET_SIMULATE(bhsz32, insn); + CSKY_INSN_SET_SIMULATE(bhz32, insn); + CSKY_INSN_SET_SIMULATE(blsz32, insn); + CSKY_INSN_SET_SIMULATE(blz32, insn); + CSKY_INSN_SET_SIMULATE(bsr32, insn); + CSKY_INSN_SET_SIMULATE(jmpi32, insn); + CSKY_INSN_SET_SIMULATE(jsri32, insn); + + return INSN_GOOD; +} diff --git a/arch/csky/kernel/probes/decode-insn.h b/arch/csky/kernel/probes/decode-insn.h new file mode 100644 index 0000000000..9c4ad48fee --- /dev/null +++ b/arch/csky/kernel/probes/decode-insn.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H +#define __CSKY_KERNEL_KPROBES_DECODE_INSN_H + +#include +#include + +enum probe_insn { + INSN_REJECTED, + INSN_GOOD_NO_SLOT, + INSN_GOOD, +}; + +#define is_insn32(insn) ((insn & 0xc000) == 0xc000) + +enum probe_insn __kprobes +csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi); + +#endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */ diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c new file mode 100644 index 0000000000..834cffcfbc --- /dev/null +++ b/arch/csky/kernel/probes/ftrace.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +/* Ftrace callback handler for kprobes -- called under preepmt disabled */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + int bit; + bool lr_saver = false; + struct kprobe *p; + struct kprobe_ctlblk *kcb; + struct pt_regs *regs; + + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) + return; + + regs = ftrace_get_regs(fregs); + p = get_kprobe((kprobe_opcode_t *)ip); + if (!p) { + p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE)); + if (unlikely(!p) || kprobe_disabled(p)) + goto out; + lr_saver = true; + } + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + unsigned long orig_ip = instruction_pointer(regs); + + if (lr_saver) + ip -= MCOUNT_INSN_SIZE; + instruction_pointer_set(regs, ip); + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) { + /* + * Emulate singlestep (and also recover regs->pc) + * as if there is a nop + */ + instruction_pointer_set(regs, + (unsigned long)p->addr + MCOUNT_INSN_SIZE); + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + instruction_pointer_set(regs, orig_ip); + } + /* + * If pre_handler returns !0, it changes regs->pc. We have to + * skip emulating post_handler. + */ + __this_cpu_write(current_kprobe, NULL); + } +out: + ftrace_test_recursion_unlock(bit); +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.api.insn = NULL; + return 0; +} diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c new file mode 100644 index 0000000000..3c6e5c725d --- /dev/null +++ b/arch/csky/kernel/probes/kprobes.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#define pr_fmt(fmt) "kprobes: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "decode-insn.h" + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); + +struct csky_insn_patch { + kprobe_opcode_t *addr; + u32 opcode; + atomic_t cpu_count; +}; + +static int __kprobes patch_text_cb(void *priv) +{ + struct csky_insn_patch *param = priv; + unsigned int addr = (unsigned int)param->addr; + + if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) { + *(u16 *) addr = cpu_to_le16(param->opcode); + dcache_wb_range(addr, addr + 2); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + } + + icache_inv_range(addr, addr + 2); + + return 0; +} + +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +{ + struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) }; + + return stop_machine_cpuslocked(patch_text_cb, ¶m, cpu_online_mask); +} + +static void __kprobes arch_prepare_ss_slot(struct kprobe *p) +{ + unsigned long offset = is_insn32(p->opcode) ? 4 : 2; + + p->ainsn.api.restore = (unsigned long)p->addr + offset; + + patch_text(p->ainsn.api.insn, p->opcode); +} + +static void __kprobes arch_prepare_simulate(struct kprobe *p) +{ + p->ainsn.api.restore = 0; +} + +static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (p->ainsn.api.handler) + p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); + + post_kprobe_handler(kcb, regs); +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x1) + return -EILSEQ; + + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + /* decode instruction */ + switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) { + case INSN_REJECTED: /* insn not supported */ + return -EINVAL; + + case INSN_GOOD_NO_SLOT: /* insn need simulation */ + p->ainsn.api.insn = NULL; + break; + + case INSN_GOOD: /* instruction uses slot */ + p->ainsn.api.insn = get_insn_slot(); + if (!p->ainsn.api.insn) + return -ENOMEM; + break; + } + + /* prepare the instruction */ + if (p->ainsn.api.insn) + arch_prepare_ss_slot(p); + else + arch_prepare_simulate(p); + + return 0; +} + +/* install breakpoint in text */ +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, USR_BKPT); +} + +/* remove breakpoint from text */ +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, p->opcode); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.api.insn) { + free_insn_slot(p->ainsn.api.insn, 0); + p->ainsn.api.insn = NULL; + } +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +/* + * Interrupts need to be disabled before single-step mode is set, and not + * reenabled until after single-step mode ends. + * Without disabling interrupt on local CPU, there is a chance of + * interrupt occurrence in the period of exception return and start of + * out-of-line single-step, that result in wrongly single stepping + * into the interrupt handler. + */ +static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + kcb->saved_sr = regs->sr; + regs->sr &= ~BIT(6); +} + +static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + regs->sr = kcb->saved_sr; +} + +static void __kprobes +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p) +{ + unsigned long offset = is_insn32(p->opcode) ? 4 : 2; + + kcb->ss_ctx.ss_pending = true; + kcb->ss_ctx.match_addr = addr + offset; +} + +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) +{ + kcb->ss_ctx.ss_pending = false; + kcb->ss_ctx.match_addr = 0; +} + +#define TRACE_MODE_SI BIT(14) +#define TRACE_MODE_MASK ~(0x3 << 14) +#define TRACE_MODE_RUN 0 + +static void __kprobes setup_singlestep(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + unsigned long slot; + + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + if (p->ainsn.api.insn) { + /* prepare for single stepping */ + slot = (unsigned long)p->ainsn.api.insn; + + set_ss_context(kcb, slot, p); /* mark pending ss */ + + /* IRQs and single stepping do not mix well. */ + kprobes_save_local_irqflag(kcb, regs); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; + instruction_pointer_set(regs, slot); + } else { + /* insn simulation */ + arch_simulate_insn(p, regs); + } +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Failed to recover from reentered kprobes.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + + return 1; +} + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + + if (!cur) + return; + + /* return addr restore if non-branching insn */ + if (cur->ainsn.api.restore != 0) + regs->pc = cur->ainsn.api.restore; + + /* restore back original saved kprobe variables and continue */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + return; + } + + /* call post handler */ + kcb->kprobe_status = KPROBE_HIT_SSDONE; + if (cur->post_handler) { + /* post_handler can hit breakpoint and single step + * again, so we enable D-flag for recursive exception. + */ + cur->post_handler(cur, regs, 0); + } + + reset_current_kprobe(); +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the ip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->pc = (unsigned long) cur->addr; + BUG_ON(!instruction_pointer(regs)); + + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + } + return 0; +} + +int __kprobes +kprobe_breakpoint_handler(struct pt_regs *regs) +{ + struct kprobe *p, *cur_kprobe; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + kcb = get_kprobe_ctlblk(); + cur_kprobe = kprobe_running(); + + p = get_kprobe((kprobe_opcode_t *) addr); + + if (p) { + if (cur_kprobe) { + if (reenter_kprobe(p, regs, kcb)) + return 1; + } else { + /* Probe hit */ + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. + * + * pre_handler can hit a breakpoint and can step thru + * before return. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + } + return 1; + } + + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * Return back to original instruction, and continue. + */ + return 0; +} + +int __kprobes +kprobe_single_step_handler(struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if ((kcb->ss_ctx.ss_pending) + && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) { + clear_ss_context(kcb); /* clear pending ss */ + + kprobes_restore_local_irqflag(kcb, regs); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; + + post_kprobe_handler(kcb, regs); + return 1; + } + return 0; +} + +/* + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). + */ +int __init arch_populate_kprobe_blacklist(void) +{ + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + return ret; +} + +void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) +{ + return (void *)kretprobe_trampoline_handler(regs, NULL); +} + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)regs->lr; + ri->fp = NULL; + regs->lr = (unsigned long) &__kretprobe_trampoline; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} + +int __init arch_init_kprobes(void) +{ + return 0; +} diff --git a/arch/csky/kernel/probes/kprobes_trampoline.S b/arch/csky/kernel/probes/kprobes_trampoline.S new file mode 100644 index 0000000000..ba48ad04a8 --- /dev/null +++ b/arch/csky/kernel/probes/kprobes_trampoline.S @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#include + +#include + +ENTRY(__kretprobe_trampoline) + SAVE_REGS_FTRACE + + mov a0, sp /* pt_regs */ + + jbsr trampoline_probe_handler + + /* use the result as the return-address */ + mov lr, a0 + + RESTORE_REGS_FTRACE + rts +ENDPROC(__kretprobe_trampoline) diff --git a/arch/csky/kernel/probes/simulate-insn.c b/arch/csky/kernel/probes/simulate-insn.c new file mode 100644 index 0000000000..d6e8d092c9 --- /dev/null +++ b/arch/csky/kernel/probes/simulate-insn.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include + +#include "decode-insn.h" +#include "simulate-insn.h" + +static inline bool csky_insn_reg_get_val(struct pt_regs *regs, + unsigned long index, + unsigned long *ptr) +{ + if (index < 14) + *ptr = *(®s->a0 + index); + + if (index > 15 && index < 31) + *ptr = *(®s->exregs[0] + index - 16); + + switch (index) { + case 14: + *ptr = regs->usp; + break; + case 15: + *ptr = regs->lr; + break; + case 31: + *ptr = regs->tls; + break; + default: + goto fail; + } + + return true; +fail: + return false; +} + +static inline bool csky_insn_reg_set_val(struct pt_regs *regs, + unsigned long index, + unsigned long val) +{ + if (index < 14) + *(®s->a0 + index) = val; + + if (index > 15 && index < 31) + *(®s->exregs[0] + index - 16) = val; + + switch (index) { + case 14: + regs->usp = val; + break; + case 15: + regs->lr = val; + break; + case 31: + regs->tls = val; + break; + default: + goto fail; + } + + return true; +fail: + return false; +} + +void __kprobes +simulate_br16(u32 opcode, long addr, struct pt_regs *regs) +{ + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); +} + +void __kprobes +simulate_br32(u32 opcode, long addr, struct pt_regs *regs) +{ + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); +} + +void __kprobes +simulate_bt16(u32 opcode, long addr, struct pt_regs *regs) +{ + if (regs->sr & 1) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); + else + instruction_pointer_set(regs, addr + 2); +} + +void __kprobes +simulate_bt32(u32 opcode, long addr, struct pt_regs *regs) +{ + if (regs->sr & 1) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bf16(u32 opcode, long addr, struct pt_regs *regs) +{ + if (!(regs->sr & 1)) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); + else + instruction_pointer_set(regs, addr + 2); +} + +void __kprobes +simulate_bf32(u32 opcode, long addr, struct pt_regs *regs) +{ + if (!(regs->sr & 1)) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = (opcode >> 2) & 0xf; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = (opcode >> 2) & 0xf; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + regs->lr = addr + 2; + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + regs->lr = addr + 4; + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long tmp = (opcode & 0x300) >> 3; + unsigned long offset = ((opcode & 0x1f) | tmp) << 2; + + tmp = (opcode & 0xe0) >> 5; + + val = *(unsigned int *)(instruction_pointer(regs) + offset); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = (opcode & 0xffff0000) >> 14; + unsigned long tmp = opcode & 0x0000001f; + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_pop16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long *tmp = (unsigned long *)regs->usp; + int i; + + for (i = 0; i < (opcode & 0xf); i++) { + csky_insn_reg_set_val(regs, i + 4, *tmp); + tmp += 1; + } + + if (opcode & 0x10) { + csky_insn_reg_set_val(regs, 15, *tmp); + tmp += 1; + } + + regs->usp = (unsigned long)tmp; + + instruction_pointer_set(regs, regs->lr); +} + +void __kprobes +simulate_pop32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long *tmp = (unsigned long *)regs->usp; + int i; + + for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) { + csky_insn_reg_set_val(regs, i + 4, *tmp); + tmp += 1; + } + + if (opcode & 0x100000) { + csky_insn_reg_set_val(regs, 15, *tmp); + tmp += 1; + } + + for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) { + csky_insn_reg_set_val(regs, i + 16, *tmp); + tmp += 1; + } + + if (opcode & 0x1000000) { + csky_insn_reg_set_val(regs, 29, *tmp); + tmp += 1; + } + + regs->usp = (unsigned long)tmp; + + instruction_pointer_set(regs, regs->lr); +} + +void __kprobes +simulate_bez32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + if (tmp == 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + if (tmp != 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + long val; + + csky_insn_reg_get_val(regs, tmp, (unsigned long *)&val); + + val -= 1; + + if (val > 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, (unsigned long)val); +} + +void __kprobes +simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if ((long) val >= 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if ((long) val > 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if ((long) val <= 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_blz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if ((long) val < 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp; + + tmp = (opcode & 0xffff) << 16; + tmp |= (opcode & 0xffff0000) >> 16; + + instruction_pointer_set(regs, + addr + sign_extend32((tmp & 0x3ffffff) << 1, 15)); + + regs->lr = addr + 4; +} + +void __kprobes +simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = ((opcode & 0xffff0000) >> 14); + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + instruction_pointer_set(regs, val); +} + +void __kprobes +simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = ((opcode & 0xffff0000) >> 14); + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + regs->lr = addr + 4; + + instruction_pointer_set(regs, val); +} diff --git a/arch/csky/kernel/probes/simulate-insn.h b/arch/csky/kernel/probes/simulate-insn.h new file mode 100644 index 0000000000..ba4cb7ef06 --- /dev/null +++ b/arch/csky/kernel/probes/simulate-insn.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H +#define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H + +#define __CSKY_INSN_FUNCS(name, mask, val) \ +static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \ +{ \ + BUILD_BUG_ON(~(mask) & (val)); \ + return (code & (mask)) == (val); \ +} \ +void simulate_##name(u32 opcode, long addr, struct pt_regs *regs); + +#define CSKY_INSN_SET_SIMULATE(name, code) \ + do { \ + if (csky_insn_is_##name(code)) { \ + api->handler = simulate_##name; \ + return INSN_GOOD_NO_SLOT; \ + } \ + } while (0) + +__CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400) +__CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800) +__CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00) +__CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800) +__CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801) +__CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000) +__CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480) + +__CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800) +__CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860) +__CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840) +__CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0) +__CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0) +__CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80) +__CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0) + +__CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900) +__CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920) +__CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820) +__CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0) +__CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940) +__CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960) +__CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980) +__CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000) +__CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0) +__CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0) + +#endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */ diff --git a/arch/csky/kernel/probes/uprobes.c b/arch/csky/kernel/probes/uprobes.c new file mode 100644 index 0000000000..936bea6fd3 --- /dev/null +++ b/arch/csky/kernel/probes/uprobes.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014-2016 Pratyush Anand + */ +#include +#include +#include +#include + +#include "decode-insn.h" + +#define UPROBE_TRAP_NR UINT_MAX + +bool is_swbp_insn(uprobe_opcode_t *insn) +{ + return (*insn & 0xffff) == UPROBE_SWBP_INSN; +} + +unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) +{ + return instruction_pointer(regs); +} + +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, + unsigned long addr) +{ + probe_opcode_t insn; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + + auprobe->insn_size = is_insn32(insn) ? 4 : 2; + + switch (csky_probe_decode_insn(&insn, &auprobe->api)) { + case INSN_REJECTED: + return -EINVAL; + + case INSN_GOOD_NO_SLOT: + auprobe->simulate = true; + break; + + default: + break; + } + + return 0; +} + +int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + utask->autask.saved_trap_no = current->thread.trap_no; + current->thread.trap_no = UPROBE_TRAP_NR; + + instruction_pointer_set(regs, utask->xol_vaddr); + + user_enable_single_step(current); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); + current->thread.trap_no = utask->autask.saved_trap_no; + + instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size); + + user_disable_single_step(current); + + return 0; +} + +bool arch_uprobe_xol_was_trapped(struct task_struct *t) +{ + if (t->thread.trap_no != UPROBE_TRAP_NR) + return true; + + return false; +} + +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + probe_opcode_t insn; + unsigned long addr; + + if (!auprobe->simulate) + return false; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + addr = instruction_pointer(regs); + + if (auprobe->api.handler) + auprobe->api.handler(insn, addr, regs); + + return true; +} + +void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + current->thread.trap_no = utask->autask.saved_trap_no; + + /* + * Task has received a fatal signal, so reset back to probed + * address. + */ + instruction_pointer_set(regs, utask->vaddr); + + user_disable_single_step(current); +} + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return regs->usp <= ret->stack; + else + return regs->usp < ret->stack; +} + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, + struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->lr; + + regs->lr = trampoline_vaddr; + + return ra; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +int uprobe_breakpoint_handler(struct pt_regs *regs) +{ + if (uprobe_pre_sstep_notifier(regs)) + return 1; + + return 0; +} + +int uprobe_single_step_handler(struct pt_regs *regs) +{ + if (uprobe_post_sstep_notifier(regs)) + return 1; + + return 0; +} diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c new file mode 100644 index 0000000000..0c6e4b17fe --- /dev/null +++ b/arch/csky/kernel/process.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct cpuinfo_csky cpu_data[NR_CPUS]; + +#ifdef CONFIG_STACKPROTECTOR +#include +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + +asmlinkage void ret_from_fork(void); +asmlinkage void ret_from_kernel_thread(void); + +/* + * Some archs flush debug and FPU info here + */ +void flush_thread(void){} + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; + struct switch_stack *childstack; + struct pt_regs *childregs = task_pt_regs(p); + +#ifdef CONFIG_CPU_HAS_FPU + save_to_user_fp(&p->thread.user_fp); +#endif + + childstack = ((struct switch_stack *) childregs) - 1; + memset(childstack, 0, sizeof(struct switch_stack)); + + /* setup thread.sp for switch_to !!! */ + p->thread.sp = (unsigned long)childstack; + + if (unlikely(args->fn)) { + memset(childregs, 0, sizeof(struct pt_regs)); + childstack->r15 = (unsigned long) ret_from_kernel_thread; + childstack->r10 = (unsigned long) args->fn_arg; + childstack->r9 = (unsigned long) args->fn; + childregs->sr = mfcr("psr"); + } else { + *childregs = *(current_pt_regs()); + if (usp) + childregs->usp = usp; + if (clone_flags & CLONE_SETTLS) + task_thread_info(p)->tp_value = childregs->tls + = tls; + + childregs->a0 = 0; + childstack->r15 = (unsigned long) ret_from_fork; + } + + return 0; +} + +/* Fill in the fpu structure for a core dump. */ +int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) +{ + memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu)); + return 1; +} + +int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) +{ + struct pt_regs *regs = task_pt_regs(tsk); + + /* NOTE: usp is error value. */ + ELF_CORE_COPY_REGS((*pr_regs), regs) + + return 1; +} + +#ifndef CONFIG_CPU_PM_NONE +void arch_cpu_idle(void) +{ +#ifdef CONFIG_CPU_PM_WAIT + asm volatile("wait\n"); +#endif + +#ifdef CONFIG_CPU_PM_DOZE + asm volatile("doze\n"); +#endif + +#ifdef CONFIG_CPU_PM_STOP + asm volatile("stop\n"); +#endif +} +#endif diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c new file mode 100644 index 0000000000..0f7e7b653c --- /dev/null +++ b/arch/csky/kernel/ptrace.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#define CREATE_TRACE_POINTS +#include + +/* sets the trace bits. */ +#define TRACE_MODE_SI (1 << 14) +#define TRACE_MODE_RUN 0 +#define TRACE_MODE_MASK ~(0x3 << 14) + +/* + * Make sure the single step bit is not set. + */ +static void singlestep_disable(struct task_struct *tsk) +{ + struct pt_regs *regs; + + regs = task_pt_regs(tsk); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; + + /* Enable irq */ + regs->sr |= BIT(6); +} + +static void singlestep_enable(struct task_struct *tsk) +{ + struct pt_regs *regs; + + regs = task_pt_regs(tsk); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; + + /* Disable irq */ + regs->sr &= ~BIT(6); +} + +/* + * Make sure the single step bit is set. + */ +void user_enable_single_step(struct task_struct *child) +{ + singlestep_enable(child); +} + +void user_disable_single_step(struct task_struct *child) +{ + singlestep_disable(child); +} + +enum csky_regset { + REGSET_GPR, + REGSET_FPR, +}; + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct pt_regs *regs = task_pt_regs(target); + + /* Abiv1 regs->tls is fake and we need sync here. */ + regs->tls = task_thread_info(target)->tp_value; + + return membuf_write(&to, regs, sizeof(*regs)); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct pt_regs regs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1); + if (ret) + return ret; + + /* BIT(0) of regs.sr is Condition Code/Carry bit */ + regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0)); +#ifdef CONFIG_CPU_HAS_HILO + regs.dcsr = task_pt_regs(target)->dcsr; +#endif + task_thread_info(target)->tp_value = regs.tls; + + *task_pt_regs(target) = regs; + + return 0; +} + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; + +#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) + int i; + struct user_fp tmp = *regs; + + for (i = 0; i < 16; i++) { + tmp.vr[i*4] = regs->vr[i*2]; + tmp.vr[i*4 + 1] = regs->vr[i*2 + 1]; + } + + for (i = 0; i < 32; i++) + tmp.vr[64 + i] = regs->vr[32 + i]; + + return membuf_write(&to, &tmp, sizeof(tmp)); +#else + return membuf_write(&to, regs, sizeof(*regs)); +#endif +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; + +#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) + int i; + struct user_fp tmp; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tmp, 0, -1); + + *regs = tmp; + + for (i = 0; i < 16; i++) { + regs->vr[i*2] = tmp.vr[i*4]; + regs->vr[i*2 + 1] = tmp.vr[i*4 + 1]; + } + + for (i = 0; i < 32; i++) + regs->vr[32 + i] = tmp.vr[64 + i]; +#else + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); +#endif + + return ret; +} + +static const struct user_regset csky_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct pt_regs) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = gpr_get, + .set = gpr_set, + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fp) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = fpr_get, + .set = fpr_set, + }, +}; + +static const struct user_regset_view user_csky_view = { + .name = "csky", + .e_machine = ELF_ARCH, + .regsets = csky_regsets, + .n = ARRAY_SIZE(csky_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_csky_view; +} + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(tls), + REG_OFFSET_NAME(lr), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(sr), + REG_OFFSET_NAME(usp), + REG_OFFSET_NAME(orig_a0), + REG_OFFSET_NAME(a0), + REG_OFFSET_NAME(a1), + REG_OFFSET_NAME(a2), + REG_OFFSET_NAME(a3), + REG_OFFSET_NAME(regs[0]), + REG_OFFSET_NAME(regs[1]), + REG_OFFSET_NAME(regs[2]), + REG_OFFSET_NAME(regs[3]), + REG_OFFSET_NAME(regs[4]), + REG_OFFSET_NAME(regs[5]), + REG_OFFSET_NAME(regs[6]), + REG_OFFSET_NAME(regs[7]), + REG_OFFSET_NAME(regs[8]), + REG_OFFSET_NAME(regs[9]), +#if defined(__CSKYABIV2__) + REG_OFFSET_NAME(exregs[0]), + REG_OFFSET_NAME(exregs[1]), + REG_OFFSET_NAME(exregs[2]), + REG_OFFSET_NAME(exregs[3]), + REG_OFFSET_NAME(exregs[4]), + REG_OFFSET_NAME(exregs[5]), + REG_OFFSET_NAME(exregs[6]), + REG_OFFSET_NAME(exregs[7]), + REG_OFFSET_NAME(exregs[8]), + REG_OFFSET_NAME(exregs[9]), + REG_OFFSET_NAME(exregs[10]), + REG_OFFSET_NAME(exregs[11]), + REG_OFFSET_NAME(exregs[12]), + REG_OFFSET_NAME(exregs[13]), + REG_OFFSET_NAME(exregs[14]), + REG_OFFSET_NAME(rhi), + REG_OFFSET_NAME(rlo), + REG_OFFSET_NAME(dcsr), +#endif + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return (addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + +void ptrace_disable(struct task_struct *child) +{ + singlestep_disable(child); +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + long ret = -EIO; + + switch (request) { + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +asmlinkage int syscall_trace_enter(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (ptrace_report_syscall_entry(regs)) + return -1; + + if (secure_computing() == -1) + return -1; + + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_enter(regs, syscall_get_nr(current, regs)); + + audit_syscall_entry(regs_syscallid(regs), regs->a0, regs->a1, regs->a2, regs->a3); + return 0; +} + +asmlinkage void syscall_trace_exit(struct pt_regs *regs) +{ + audit_syscall_exit(regs); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ptrace_report_syscall_exit(regs, 0); + + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_exit(regs, syscall_get_return_value(current, regs)); +} + +#ifdef CONFIG_CPU_CK860 +static void show_iutlb(void) +{ + int entry, i; + unsigned long flags; + unsigned long oldpid; + unsigned long entryhi[16], entrylo0[16], entrylo1[16]; + + oldpid = read_mmu_entryhi(); + + entry = 0x8000; + + local_irq_save(flags); + + for (i = 0; i < 16; i++) { + write_mmu_index(entry); + tlb_read(); + entryhi[i] = read_mmu_entryhi(); + entrylo0[i] = read_mmu_entrylo0(); + entrylo1[i] = read_mmu_entrylo1(); + + entry++; + } + + local_irq_restore(flags); + + write_mmu_entryhi(oldpid); + + printk("\n\n\n"); + for (i = 0; i < 16; i++) + printk("iutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;" + " entrylo1 - 0x%lx\n", + i, entryhi[i], entrylo0[i], entrylo1[i]); + printk("\n\n\n"); +} + +static void show_dutlb(void) +{ + int entry, i; + unsigned long flags; + unsigned long oldpid; + unsigned long entryhi[16], entrylo0[16], entrylo1[16]; + + oldpid = read_mmu_entryhi(); + + entry = 0x4000; + + local_irq_save(flags); + + for (i = 0; i < 16; i++) { + write_mmu_index(entry); + tlb_read(); + entryhi[i] = read_mmu_entryhi(); + entrylo0[i] = read_mmu_entrylo0(); + entrylo1[i] = read_mmu_entrylo1(); + + entry++; + } + + local_irq_restore(flags); + + write_mmu_entryhi(oldpid); + + printk("\n\n\n"); + for (i = 0; i < 16; i++) + printk("dutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;" + " entrylo1 - 0x%lx\n", + i, entryhi[i], entrylo0[i], entrylo1[i]); + printk("\n\n\n"); +} + +static unsigned long entryhi[1024], entrylo0[1024], entrylo1[1024]; +static void show_jtlb(void) +{ + int entry; + unsigned long flags; + unsigned long oldpid; + + oldpid = read_mmu_entryhi(); + + entry = 0; + + local_irq_save(flags); + while (entry < 1024) { + write_mmu_index(entry); + tlb_read(); + entryhi[entry] = read_mmu_entryhi(); + entrylo0[entry] = read_mmu_entrylo0(); + entrylo1[entry] = read_mmu_entrylo1(); + + entry++; + } + local_irq_restore(flags); + + write_mmu_entryhi(oldpid); + + printk("\n\n\n"); + + for (entry = 0; entry < 1024; entry++) + printk("jtlb[%x]: entryhi - 0x%lx; entrylo0 - 0x%lx;" + " entrylo1 - 0x%lx\n", + entry, entryhi[entry], entrylo0[entry], entrylo1[entry]); + printk("\n\n\n"); +} + +static void show_tlb(void) +{ + show_iutlb(); + show_dutlb(); + show_jtlb(); +} +#else +static void show_tlb(void) +{ + return; +} +#endif + +void show_regs(struct pt_regs *fp) +{ + pr_info("\nCURRENT PROCESS:\n\n"); + pr_info("COMM=%s PID=%d\n", current->comm, current->pid); + + if (current->mm) { + pr_info("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n", + (int) current->mm->start_code, + (int) current->mm->end_code, + (int) current->mm->start_data, + (int) current->mm->end_data, + (int) current->mm->end_data, + (int) current->mm->brk); + pr_info("USER-STACK=%08x KERNEL-STACK=%08x\n\n", + (int) current->mm->start_stack, + (int) (((unsigned long) current) + 2 * PAGE_SIZE)); + } + + pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc); + pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr); + pr_info("SP: 0x%08lx\n", (long)fp->usp); + pr_info("PSR: 0x%08lx\n", (long)fp->sr); + pr_info("orig_a0: 0x%08lx\n", fp->orig_a0); + pr_info("PT_REGS: 0x%08lx\n", (long)fp); + + pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n", + fp->a0, fp->a1, fp->a2, fp->a3); +#if defined(__CSKYABIV2__) + pr_info(" r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n", + fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); + pr_info(" r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n", + fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); + pr_info("r12: 0x%08lx r13: 0x%08lx r15: 0x%08lx\n", + fp->regs[8], fp->regs[9], fp->lr); + pr_info("r16: 0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n", + fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]); + pr_info("r20: 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n", + fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]); + pr_info("r24: 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n", + fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]); + pr_info("r28: 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n", + fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls); + pr_info(" hi: 0x%08lx lo: 0x%08lx\n", + fp->rhi, fp->rlo); +#else + pr_info(" r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n", + fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); + pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n", + fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); + pr_info("r14: 0x%08lx r1: 0x%08lx\n", + fp->regs[8], fp->regs[9]); +#endif + + show_tlb(); + + return; +} diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c new file mode 100644 index 0000000000..106fbf0b6f --- /dev/null +++ b/arch/csky/kernel/setup.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_DUMMY_CONSOLE +struct screen_info screen_info = { + .orig_video_lines = 30, + .orig_video_cols = 80, + .orig_video_mode = 0, + .orig_video_ega_bx = 0, + .orig_video_isVGA = 1, + .orig_video_points = 8 +}; +#endif + +static void __init csky_memblock_init(void) +{ + unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); + unsigned long sseg_size = PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET); + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; + signed long size; + + memblock_reserve(__pa(_start), _end - _start); + + early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + + memblock_dump_all(); + + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); + max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + size = max_pfn - min_low_pfn; + + if (size >= lowmem_size) { + max_low_pfn = min_low_pfn + lowmem_size; +#ifdef CONFIG_PAGE_OFFSET_80000000 + write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); +#endif + } else if (size > sseg_size) { + max_low_pfn = min_low_pfn + sseg_size; + } + + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; + + mmu_init(min_low_pfn, max_low_pfn); + +#ifdef CONFIG_HIGHMEM + max_zone_pfn[ZONE_HIGHMEM] = max_pfn; + + highstart_pfn = max_low_pfn; + highend_pfn = max_pfn; +#endif + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + + dma_contiguous_reserve(0); + + free_area_init(max_zone_pfn); +} + +void __init setup_arch(char **cmdline_p) +{ + *cmdline_p = boot_command_line; + + console_verbose(); + + pr_info("Phys. mem: %ldMB\n", + (unsigned long) memblock_phys_mem_size()/1024/1024); + + setup_initial_init_mm(_start, _etext, _edata, _end); + + parse_early_param(); + + csky_memblock_init(); + + unflatten_and_copy_device_tree(); + +#ifdef CONFIG_SMP + setup_smp(); +#endif + + sparse_init(); + + fixaddr_init(); + +#ifdef CONFIG_HIGHMEM + kmap_init(); +#endif +} + +unsigned long va_pa_offset; +EXPORT_SYMBOL(va_pa_offset); + +static inline unsigned long read_mmu_msa(void) +{ +#ifdef CONFIG_PAGE_OFFSET_80000000 + return read_mmu_msa0(); +#endif + +#ifdef CONFIG_PAGE_OFFSET_A0000000 + return read_mmu_msa1(); +#endif +} + +asmlinkage __visible void __init csky_start(unsigned int unused, + void *dtb_start) +{ + /* Clean up bss section */ + memset(__bss_start, 0, __bss_stop - __bss_start); + + va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1); + + pre_trap_init(); + + if (dtb_start == NULL) + early_init_dt_scan(__dtb_start); + else + early_init_dt_scan(dtb_start); + + start_kernel(); + + asm volatile("br .\n"); +} diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c new file mode 100644 index 0000000000..10da0fefd4 --- /dev/null +++ b/arch/csky/kernel/signal.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#ifdef CONFIG_CPU_HAS_FPU +#include +static int restore_fpu_state(struct sigcontext __user *sc) +{ + int err = 0; + struct user_fp user_fp; + + err = __copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp)); + + restore_from_user_fp(&user_fp); + + return err; +} + +static int save_fpu_state(struct sigcontext __user *sc) +{ + struct user_fp user_fp; + + save_to_user_fp(&user_fp); + + return __copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp)); +} +#else +#define restore_fpu_state(sigcontext) (0) +#define save_fpu_state(sigcontext) (0) +#endif + +struct rt_sigframe { + /* + * pad[3] is compatible with the same struct defined in + * gcc/libgcc/config/csky/linux-unwind.h + */ + int pad[3]; + struct siginfo info; + struct ucontext uc; +}; + +static long restore_sigcontext(struct pt_regs *regs, + struct sigcontext __user *sc) +{ + int err = 0; + unsigned long sr = regs->sr; + + /* sc_pt_regs is structured the same as the start of pt_regs */ + err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs)); + + /* BIT(0) of regs->sr is Condition Code/Carry bit */ + regs->sr = (sr & ~1) | (regs->sr & 1); + + /* Restore the floating-point state. */ + err |= restore_fpu_state(sc); + + return err; +} + +SYSCALL_DEFINE0(rt_sigreturn) +{ + struct pt_regs *regs = current_pt_regs(); + struct rt_sigframe __user *frame; + sigset_t set; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + frame = (struct rt_sigframe __user *)regs->usp; + + if (!access_ok(frame, sizeof(*frame))) + goto badframe; + + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + set_current_blocked(&set); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return regs->a0; + +badframe: + force_sig(SIGSEGV); + return 0; +} + +static int setup_sigcontext(struct rt_sigframe __user *frame, + struct pt_regs *regs) +{ + struct sigcontext __user *sc = &frame->uc.uc_mcontext; + int err = 0; + + err |= __copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs)); + err |= save_fpu_state(sc); + + return err; +} + +static inline void __user *get_sigframe(struct ksignal *ksig, + struct pt_regs *regs, size_t framesize) +{ + unsigned long sp; + /* Default to using normal stack */ + sp = regs->usp; + + /* + * If we are on the alternate signal stack and would overflow it, don't. + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) + return (void __user __force *)(-1UL); + + /* This is the X/Open sanctioned signal stack switching. */ + sp = sigsp(sp, ksig) - framesize; + + /* Align the stack frame. */ + sp &= -8UL; + + return (void __user *)sp; +} + +static int +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + int err = 0; + + frame = get_sigframe(ksig, regs, sizeof(*frame)); + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(NULL, &frame->uc.uc_link); + err |= __save_altstack(&frame->uc.uc_stack, regs->usp); + err |= setup_sigcontext(frame, regs); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + return -EFAULT; + + /* Set up to return from userspace. */ + regs->lr = (unsigned long)VDSO_SYMBOL( + current->mm->context.vdso, rt_sigreturn); + + /* + * Set up registers for signal handler. + * Registers that we don't modify keep the value they had from + * user-space at the time we took the signal. + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, + * since some things rely on this (e.g. glibc's debug/segfault.c). + */ + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; + regs->usp = (unsigned long)frame; + regs->a0 = ksig->sig; /* a0: signal number */ + regs->a1 = (unsigned long)(&(frame->info)); /* a1: siginfo pointer */ + regs->a2 = (unsigned long)(&(frame->uc)); /* a2: ucontext pointer */ + + return 0; +} + +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + /* Are we from a system call? */ + if (in_syscall(regs)) { + /* Avoid additional syscall restarting via ret_from_exception */ + forget_syscall(regs); + + /* If so, check system call restarting.. */ + switch (regs->a0) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs->a0 = -EINTR; + break; + + case -ERESTARTSYS: + if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { + regs->a0 = -EINTR; + break; + } + fallthrough; + case -ERESTARTNOINTR: + regs->a0 = regs->orig_a0; + regs->pc -= TRAP0_SIZE; + break; + } + } + + /* Set up the stack frame */ + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, 0); +} + +static void do_signal(struct pt_regs *regs) +{ + struct ksignal ksig; + + if (get_signal(&ksig)) { + /* Actually deliver the signal */ + handle_signal(&ksig, regs); + return; + } + + /* Did we come from a system call? */ + if (in_syscall(regs)) { + /* Avoid additional syscall restarting via ret_from_exception */ + forget_syscall(regs); + + /* Restart the system call - no handlers present */ + switch (regs->a0) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + regs->a0 = regs->orig_a0; + regs->pc -= TRAP0_SIZE; + break; + case -ERESTART_RESTARTBLOCK: + regs->a0 = regs->orig_a0; + regs_syscallid(regs) = __NR_restart_syscall; + regs->pc -= TRAP0_SIZE; + break; + } + } + + /* + * If there is no signal to deliver, we just put the saved + * sigmask back. + */ + restore_saved_sigmask(); +} + +/* + * notification of userspace execution resumption + * - triggered by the _TIF_WORK_MASK flags + */ +asmlinkage void do_notify_resume(struct pt_regs *regs, + unsigned long thread_info_flags) +{ + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + + /* Handle pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_info_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); +} diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c new file mode 100644 index 0000000000..8e42352cbf --- /dev/null +++ b/arch/csky/kernel/smp.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_CPU_HAS_FPU +#include +#endif + +enum ipi_message_type { + IPI_EMPTY, + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_IRQ_WORK, + IPI_MAX +}; + +struct ipi_data_struct { + unsigned long bits ____cacheline_aligned; + unsigned long stats[IPI_MAX] ____cacheline_aligned; +}; +static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); + +static irqreturn_t handle_ipi(int irq, void *dev) +{ + unsigned long *stats = this_cpu_ptr(&ipi_data)->stats; + + while (true) { + unsigned long ops; + + ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); + if (ops == 0) + return IRQ_HANDLED; + + if (ops & (1 << IPI_RESCHEDULE)) { + stats[IPI_RESCHEDULE]++; + scheduler_ipi(); + } + + if (ops & (1 << IPI_CALL_FUNC)) { + stats[IPI_CALL_FUNC]++; + generic_smp_call_function_interrupt(); + } + + if (ops & (1 << IPI_IRQ_WORK)) { + stats[IPI_IRQ_WORK]++; + irq_work_run(); + } + + BUG_ON((ops >> IPI_MAX) != 0); + } + + return IRQ_HANDLED; +} + +static void (*send_arch_ipi)(const struct cpumask *mask); + +static int ipi_irq; +void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) +{ + if (send_arch_ipi) + return; + + send_arch_ipi = func; + ipi_irq = irq; +} + +static void +send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) +{ + int i; + + for_each_cpu(i, to_whom) + set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); + + smp_mb(); + send_arch_ipi(to_whom); +} + +static const char * const ipi_names[] = { + [IPI_EMPTY] = "Empty interrupts", + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_IRQ_WORK] = "Irq work interrupts", +}; + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + unsigned int cpu, i; + + for (i = 0; i < IPI_MAX; i++) { + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, + prec >= 4 ? " " : ""); + for_each_online_cpu(cpu) + seq_printf(p, "%10lu ", + per_cpu_ptr(&ipi_data, cpu)->stats[i]); + seq_printf(p, " %s\n", ipi_names[i]); + } + + return 0; +} + +void arch_send_call_function_ipi_mask(struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +static void ipi_stop(void *unused) +{ + while (1); +} + +void smp_send_stop(void) +{ + on_each_cpu(ipi_stop, NULL, 1); +} + +void arch_smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} + +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); +} +#endif + +void __init smp_prepare_boot_cpu(void) +{ +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ +} + +static int ipi_dummy_dev; + +void __init setup_smp_ipi(void) +{ + int rc; + + if (ipi_irq == 0) + return; + + rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", + &ipi_dummy_dev); + if (rc) + panic("%s IRQ request failed\n", __func__); + + enable_percpu_irq(ipi_irq, 0); +} + +void __init setup_smp(void) +{ + struct device_node *node = NULL; + unsigned int cpu; + + for_each_of_cpu_node(node) { + if (!of_device_is_available(node)) + continue; + + cpu = of_get_cpu_hwid(node, 0); + if (cpu >= NR_CPUS) + continue; + + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); + } +} + +extern void _start_smp_secondary(void); + +volatile unsigned int secondary_hint; +volatile unsigned int secondary_hint2; +volatile unsigned int secondary_ccr; +volatile unsigned int secondary_stack; +volatile unsigned int secondary_msa1; +volatile unsigned int secondary_pgd; + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + unsigned long mask = 1 << cpu; + + secondary_stack = + (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; + secondary_hint = mfcr("cr31"); + secondary_hint2 = mfcr("cr<21, 1>"); + secondary_ccr = mfcr("cr18"); + secondary_msa1 = read_mmu_msa1(); + secondary_pgd = mfcr("cr<29, 15>"); + + /* + * Because other CPUs are in reset status, we must flush data + * from cache to out and secondary CPUs use them in + * csky_start_secondary(void) + */ + mtcr("cr17", 0x22); + + if (mask & mfcr("cr<29, 0>")) { + send_arch_ipi(cpumask_of(cpu)); + } else { + /* Enable cpu in SMP reset ctrl reg */ + mask |= mfcr("cr<29, 0>"); + mtcr("cr<29, 0>", mask); + } + + /* Wait for the cpu online */ + while (!cpu_online(cpu)); + + secondary_stack = 0; + + return 0; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +void csky_start_secondary(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + mtcr("cr31", secondary_hint); + mtcr("cr<21, 1>", secondary_hint2); + mtcr("cr18", secondary_ccr); + + mtcr("vbr", vec_base); + + flush_tlb_all(); + write_mmu_pagemask(0); + +#ifdef CONFIG_CPU_HAS_FPU + init_fpu(); +#endif + + enable_percpu_irq(ipi_irq, 0); + + mmget(mm); + mmgrab(mm); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + + notify_cpu_starting(cpu); + set_cpu_online(cpu, true); + + pr_info("CPU%u Online: %s...\n", cpu, __func__); + + local_irq_enable(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + + set_cpu_online(cpu, false); + + irq_migrate_all_off_this_cpu(); + + clear_tasks_mm_cpumask(cpu); + + return 0; +} + +void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) +{ + pr_notice("CPU%u: shutdown\n", cpu); +} + +void __noreturn arch_cpu_idle_dead(void) +{ + idle_task_exit(); + + cpuhp_ap_report_dead(); + + while (!secondary_stack) + arch_cpu_idle(); + + raw_local_irq_disable(); + + asm volatile( + "mov sp, %0\n" + "mov r8, %0\n" + "jmpi csky_start_secondary" + : + : "r" (secondary_stack)); + + BUG(); +} +#endif diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c new file mode 100644 index 0000000000..27ecd63e32 --- /dev/null +++ b/arch/csky/kernel/stacktrace.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include + +#ifdef CONFIG_FRAME_POINTER + +struct stackframe { + unsigned long fp; + unsigned long ra; +}; + +void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + bool (*fn)(unsigned long, void *), void *arg) +{ + unsigned long fp, sp, pc; + + if (regs) { + fp = frame_pointer(regs); + sp = user_stack_pointer(regs); + pc = instruction_pointer(regs); + } else if (task == NULL || task == current) { + const register unsigned long current_fp __asm__ ("r8"); + fp = current_fp; + sp = current_stack_pointer; + pc = (unsigned long)walk_stackframe; + } else { + /* task blocked in __switch_to */ + fp = thread_saved_fp(task); + sp = thread_saved_sp(task); + pc = thread_saved_lr(task); + } + + for (;;) { + unsigned long low, high; + struct stackframe *frame; + + if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) + break; + + /* Validate frame pointer */ + low = sp; + high = ALIGN(sp, THREAD_SIZE); + if (unlikely(fp < low || fp > high || fp & 0x3)) + break; + /* Unwind stack frame */ + frame = (struct stackframe *)fp; + sp = fp; + fp = frame->fp; + pc = ftrace_graph_ret_addr(current, NULL, frame->ra, + (unsigned long *)(fp - 8)); + } +} + +#else /* !CONFIG_FRAME_POINTER */ + +static void notrace walk_stackframe(struct task_struct *task, + struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) +{ + unsigned long sp, pc; + unsigned long *ksp; + + if (regs) { + sp = user_stack_pointer(regs); + pc = instruction_pointer(regs); + } else if (task == NULL || task == current) { + sp = current_stack_pointer; + pc = (unsigned long)walk_stackframe; + } else { + /* task blocked in __switch_to */ + sp = thread_saved_sp(task); + pc = thread_saved_lr(task); + } + + if (unlikely(sp & 0x3)) + return; + + ksp = (unsigned long *)sp; + while (!kstack_end(ksp)) { + if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) + break; + pc = (*ksp++) - 0x4; + } +} +#endif /* CONFIG_FRAME_POINTER */ + +static bool print_trace_address(unsigned long pc, void *arg) +{ + print_ip_sym((const char *)arg, pc); + return false; +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + pr_cont("Call Trace:\n"); + walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); +} + +static bool save_wchan(unsigned long pc, void *arg) +{ + if (!in_sched_functions(pc)) { + unsigned long *p = arg; + *p = pc; + return true; + } + return false; +} + +unsigned long __get_wchan(struct task_struct *task) +{ + unsigned long pc = 0; + + walk_stackframe(task, NULL, save_wchan, &pc); + return pc; +} + +#ifdef CONFIG_STACKTRACE +static bool __save_trace(unsigned long pc, void *arg, bool nosched) +{ + struct stack_trace *trace = arg; + + if (unlikely(nosched && in_sched_functions(pc))) + return false; + if (unlikely(trace->skip > 0)) { + trace->skip--; + return false; + } + + trace->entries[trace->nr_entries++] = pc; + return (trace->nr_entries >= trace->max_entries); +} + +static bool save_trace(unsigned long pc, void *arg) +{ + return __save_trace(pc, arg, false); +} + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + walk_stackframe(tsk, NULL, save_trace, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(NULL, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +#endif /* CONFIG_STACKTRACE */ diff --git a/arch/csky/kernel/syscall.c b/arch/csky/kernel/syscall.c new file mode 100644 index 0000000000..3d30e58a45 --- /dev/null +++ b/arch/csky/kernel/syscall.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include + +SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) +{ + struct thread_info *ti = task_thread_info(current); + struct pt_regs *reg = current_pt_regs(); + + reg->tls = addr; + ti->tp_value = addr; + + return 0; +} + +SYSCALL_DEFINE6(mmap2, + unsigned long, addr, + unsigned long, len, + unsigned long, prot, + unsigned long, flags, + unsigned long, fd, + off_t, offset) +{ + if (unlikely(offset & (~PAGE_MASK >> 12))) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, + offset >> (PAGE_SHIFT - 12)); +} + +/* + * for abiv1 the 64bits args should be even th, So we need mov the advice + * forward. + */ +SYSCALL_DEFINE4(csky_fadvise64_64, + int, fd, + int, advice, + loff_t, offset, + loff_t, len) +{ + return ksys_fadvise64_64(fd, offset, len, advice); +} diff --git a/arch/csky/kernel/syscall_table.c b/arch/csky/kernel/syscall_table.c new file mode 100644 index 0000000000..a0c238c537 --- /dev/null +++ b/arch/csky/kernel/syscall_table.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include + +#undef __SYSCALL +#define __SYSCALL(nr, call)[nr] = (call), + +#define sys_fadvise64_64 sys_csky_fadvise64_64 +void * const sys_call_table[__NR_syscalls] __page_aligned_data = { + [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#include +}; diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c new file mode 100644 index 0000000000..52379d866f --- /dev/null +++ b/arch/csky/kernel/time.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include + +void __init time_init(void) +{ + of_clk_init(NULL); + timer_probe(); +} diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c new file mode 100644 index 0000000000..6e426fba01 --- /dev/null +++ b/arch/csky/kernel/traps.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_CPU_HAS_FPU +#include +#endif + +int show_unhandled_signals = 1; + +/* Defined in entry.S */ +asmlinkage void csky_trap(void); + +asmlinkage void csky_systemcall(void); +asmlinkage void csky_cmpxchg(void); +asmlinkage void csky_get_tls(void); +asmlinkage void csky_irq(void); + +asmlinkage void csky_pagefault(void); + +/* Defined in head.S */ +asmlinkage void _start_smp_secondary(void); + +void __init pre_trap_init(void) +{ + int i; + + mtcr("vbr", vec_base); + + for (i = 1; i < 128; i++) + VEC_INIT(i, csky_trap); +} + +void __init trap_init(void) +{ + VEC_INIT(VEC_AUTOVEC, csky_irq); + + /* setup trap0 trap2 trap3 */ + VEC_INIT(VEC_TRAP0, csky_systemcall); + VEC_INIT(VEC_TRAP2, csky_cmpxchg); + VEC_INIT(VEC_TRAP3, csky_get_tls); + + /* setup MMU TLB exception */ + VEC_INIT(VEC_TLBINVALIDL, csky_pagefault); + VEC_INIT(VEC_TLBINVALIDS, csky_pagefault); + VEC_INIT(VEC_TLBMODIFIED, csky_pagefault); + +#ifdef CONFIG_CPU_HAS_FPU + init_fpu(); +#endif + +#ifdef CONFIG_SMP + mtcr("cr<28, 0>", virt_to_phys(vec_base)); + + VEC_INIT(VEC_RESET, (void *)virt_to_phys(_start_smp_secondary)); +#endif +} + +static DEFINE_SPINLOCK(die_lock); + +void die(struct pt_regs *regs, const char *str) +{ + static int die_counter; + int ret; + + oops_enter(); + + spin_lock_irq(&die_lock); + console_verbose(); + bust_spinlocks(1); + + pr_emerg("%s [#%d]\n", str, ++die_counter); + print_modules(); + show_regs(regs); + show_stack(current, (unsigned long *)regs->regs[4], KERN_INFO); + + ret = notify_die(DIE_OOPS, str, regs, 0, trap_no(regs), SIGSEGV); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irq(&die_lock); + oops_exit(); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + if (ret != NOTIFY_STOP) + make_task_dead(SIGSEGV); +} + +void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) +{ + struct task_struct *tsk = current; + + if (show_unhandled_signals && unhandled_signal(tsk, signo) + && printk_ratelimit()) { + pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x%08lx", + tsk->comm, task_pid_nr(tsk), signo, code, addr); + print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); + pr_cont("\n"); + show_regs(regs); + } + + force_sig_fault(signo, code, (void __user *)addr); +} + +static void do_trap_error(struct pt_regs *regs, int signo, int code, + unsigned long addr, const char *str) +{ + current->thread.trap_no = trap_no(regs); + + if (user_mode(regs)) { + do_trap(regs, signo, code, addr); + } else { + if (!fixup_exception(regs)) + die(regs, str); + } +} + +#define DO_ERROR_INFO(name, signo, code, str) \ +asmlinkage __visible void name(struct pt_regs *regs) \ +{ \ + do_trap_error(regs, signo, code, regs->pc, "Oops - " str); \ +} + +DO_ERROR_INFO(do_trap_unknown, + SIGILL, ILL_ILLTRP, "unknown exception"); +DO_ERROR_INFO(do_trap_zdiv, + SIGFPE, FPE_INTDIV, "error zero div exception"); +DO_ERROR_INFO(do_trap_buserr, + SIGSEGV, ILL_ILLADR, "error bus error exception"); + +asmlinkage void do_trap_misaligned(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_NEED_SOFTALIGN + csky_alignment(regs); +#else + current->thread.trap_no = trap_no(regs); + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->pc, + "Oops - load/store address misaligned"); +#endif +} + +asmlinkage void do_trap_bkpt(struct pt_regs *regs) +{ +#ifdef CONFIG_KPROBES + if (kprobe_single_step_handler(regs)) + return; +#endif +#ifdef CONFIG_UPROBES + if (uprobe_single_step_handler(regs)) + return; +#endif + if (user_mode(regs)) { + send_sig(SIGTRAP, current, 0); + return; + } + + do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->pc, + "Oops - illegal trap exception"); +} + +asmlinkage void do_trap_illinsn(struct pt_regs *regs) +{ + current->thread.trap_no = trap_no(regs); + +#ifdef CONFIG_KPROBES + if (kprobe_breakpoint_handler(regs)) + return; +#endif +#ifdef CONFIG_UPROBES + if (uprobe_breakpoint_handler(regs)) + return; +#endif +#ifndef CONFIG_CPU_NO_USER_BKPT + if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) { + send_sig(SIGTRAP, current, 0); + return; + } +#endif + + do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, + "Oops - illegal instruction exception"); +} + +asmlinkage void do_trap_fpe(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_HAS_FPU + return fpu_fpe(regs); +#else + do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, + "Oops - fpu instruction exception"); +#endif +} + +asmlinkage void do_trap_priv(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_HAS_FPU + if (user_mode(regs) && fpu_libc_helper(regs)) + return; +#endif + do_trap_error(regs, SIGILL, ILL_PRVOPC, regs->pc, + "Oops - illegal privileged exception"); +} + +asmlinkage void trap_c(struct pt_regs *regs) +{ + switch (trap_no(regs)) { + case VEC_ZERODIV: + do_trap_zdiv(regs); + break; + case VEC_TRACE: + do_trap_bkpt(regs); + break; + case VEC_ILLEGAL: + do_trap_illinsn(regs); + break; + case VEC_TRAP1: + case VEC_BREAKPOINT: + do_trap_bkpt(regs); + break; + case VEC_ACCESS: + do_trap_buserr(regs); + break; + case VEC_ALIGN: + do_trap_misaligned(regs); + break; + case VEC_FPE: + do_trap_fpe(regs); + break; + case VEC_PRIV: + do_trap_priv(regs); + break; + default: + do_trap_unknown(regs); + break; + } +} diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c new file mode 100644 index 0000000000..16c20d64d1 --- /dev/null +++ b/arch/csky/kernel/vdso.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include + +#include +#ifdef GENERIC_TIME_VSYSCALL +#include +#else +#include +#endif + +extern char vdso_start[], vdso_end[]; + +static unsigned int vdso_pages; +static struct page **vdso_pagelist; + +/* + * The vDSO data page. + */ +static union { + struct vdso_data data; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = &vdso_data_store.data; + +static int __init vdso_init(void) +{ + unsigned int i; + + vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; + vdso_pagelist = + kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); + if (unlikely(vdso_pagelist == NULL)) { + pr_err("vdso: pagelist allocation failed\n"); + return -ENOMEM; + } + + for (i = 0; i < vdso_pages; i++) { + struct page *pg; + + pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); + vdso_pagelist[i] = pg; + } + vdso_pagelist[i] = virt_to_page(vdso_data); + + return 0; +} +arch_initcall(vdso_init); + +int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long vdso_base, vdso_len; + int ret; + + vdso_len = (vdso_pages + 1) << PAGE_SHIFT; + + mmap_write_lock(mm); + vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = vdso_base; + goto end; + } + + /* + * Put vDSO base into mm struct. We need to do this before calling + * install_special_mapping or the perf counter mmap tracking code + * will fail to recognise it as a vDSO (since arch_vma_name fails). + */ + mm->context.vdso = (void *)vdso_base; + + ret = + install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, + (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), + vdso_pagelist); + + if (unlikely(ret)) { + mm->context.vdso = NULL; + goto end; + } + + vdso_base += (vdso_pages << PAGE_SHIFT); + ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, + (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]); + + if (unlikely(ret)) + mm->context.vdso = NULL; +end: + mmap_write_unlock(mm); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) + return "[vdso]"; + if (vma->vm_mm && (vma->vm_start == + (long)vma->vm_mm->context.vdso + PAGE_SIZE)) + return "[vdso_data]"; + return NULL; +} diff --git a/arch/csky/kernel/vdso/.gitignore b/arch/csky/kernel/vdso/.gitignore new file mode 100644 index 0000000000..3a19def868 --- /dev/null +++ b/arch/csky/kernel/vdso/.gitignore @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +vdso.lds +*.tmp +vdso-syms.S diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile new file mode 100644 index 0000000000..299e4e41eb --- /dev/null +++ b/arch/csky/kernel/vdso/Makefile @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Include the generic Makefile to check the built vdso. +include $(srctree)/lib/vdso/Makefile + +# Symbols present in the vdso +vdso-syms += rt_sigreturn +vdso-syms += vgettimeofday + +# Files to link into the vdso +obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) +endif + +ccflags-y := -fno-stack-protector -DBUILD_VDSO32 + +# Build rules +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o +obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + +obj-y += vdso.o vdso-syms.o +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n +KCOV_INSTRUMENT := n + +# Force dependency +$(obj)/vdso.o: $(obj)/vdso.so + +SYSCFLAGS_vdso.so.dbg = $(c_flags) +$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold) +SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both + +$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE + $(call if_changed,so2s) + +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# actual build commands +# The DSO images are built using a special linker script +# Make sure only to export the intended __vdso_xxx symbol offsets. +quiet_cmd_vdsold = VDSOLD $@ + cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ + -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \ + $(CROSS_COMPILE)objcopy \ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp + +# Extracts symbol offsets from the VDSO, converting them into an assembly file +# that contains the same symbols at the same offsets. +quiet_cmd_so2s = SO2S $@ + cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@ + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso.so diff --git a/arch/csky/kernel/vdso/note.S b/arch/csky/kernel/vdso/note.S new file mode 100644 index 0000000000..2a956c9422 --- /dev/null +++ b/arch/csky/kernel/vdso/note.S @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include +#include + +ELFNOTE_START(Linux, 0, "a") + .long LINUX_VERSION_CODE +ELFNOTE_END diff --git a/arch/csky/kernel/vdso/rt_sigreturn.S b/arch/csky/kernel/vdso/rt_sigreturn.S new file mode 100644 index 0000000000..0a6bd12161 --- /dev/null +++ b/arch/csky/kernel/vdso/rt_sigreturn.S @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include +#include +#include + + .text +ENTRY(__vdso_rt_sigreturn) + .cfi_startproc + .cfi_signal_frame + SET_SYSCALL_ID + trap 0 + .cfi_endproc +ENDPROC(__vdso_rt_sigreturn) diff --git a/arch/csky/kernel/vdso/so2s.sh b/arch/csky/kernel/vdso/so2s.sh new file mode 100755 index 0000000000..69da3d529c --- /dev/null +++ b/arch/csky/kernel/vdso/so2s.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ + +sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_5.10\)*!.global \2\n.set \2,0x\1!' \ +| grep '^\.' diff --git a/arch/csky/kernel/vdso/vdso.S b/arch/csky/kernel/vdso/vdso.S new file mode 100644 index 0000000000..5162ca0694 --- /dev/null +++ b/arch/csky/kernel/vdso/vdso.S @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include +#include +#include + + __PAGE_ALIGNED_DATA + + .globl vdso_start, vdso_end + .balign PAGE_SIZE +vdso_start: + .incbin "arch/csky/kernel/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous diff --git a/arch/csky/kernel/vdso/vdso.lds.S b/arch/csky/kernel/vdso/vdso.lds.S new file mode 100644 index 0000000000..590a6c79ff --- /dev/null +++ b/arch/csky/kernel/vdso/vdso.lds.S @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include + +OUTPUT_ARCH(csky) + +SECTIONS +{ + PROVIDE(_vdso_data = . + PAGE_SIZE); + . = SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + .dynamic : { *(.dynamic) } :text :dynamic + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + + . = 0x800; + .text : { *(.text .text.*) } :text + + .data : { + *(.got.plt) *(.got) + *(.data .data.* .gnu.linkonce.d.*) + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + } +} + +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +VERSION +{ + LINUX_5.10 { + global: + __vdso_rt_sigreturn; + __vdso_clock_gettime; + __vdso_clock_gettime64; + __vdso_gettimeofday; + __vdso_clock_getres; + local: *; + }; +} diff --git a/arch/csky/kernel/vdso/vgettimeofday.c b/arch/csky/kernel/vdso/vgettimeofday.c new file mode 100644 index 0000000000..c4831145ee --- /dev/null +++ b/arch/csky/kernel/vdso/vgettimeofday.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +extern +int __vdso_clock_gettime(clockid_t clock, + struct old_timespec32 *ts); +int __vdso_clock_gettime(clockid_t clock, + struct old_timespec32 *ts) +{ + return __cvdso_clock_gettime32(clock, ts); +} + +int __vdso_clock_gettime64(clockid_t clock, + struct __kernel_timespec *ts); +int __vdso_clock_gettime64(clockid_t clock, + struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +extern +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz); +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +extern +int __vdso_clock_getres(clockid_t clock_id, + struct old_timespec32 *res); +int __vdso_clock_getres(clockid_t clock_id, + struct old_timespec32 *res) +{ + return __cvdso_clock_getres_time32(clock_id, res); +} diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S new file mode 100644 index 0000000000..d718961786 --- /dev/null +++ b/arch/csky/kernel/vmlinux.lds.S @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include + +OUTPUT_ARCH(csky) +ENTRY(_start) + +#ifndef __cskyBE__ +jiffies = jiffies_64; +#else +jiffies = jiffies_64 + 4; +#endif + +#define VBR_BASE \ + . = ALIGN(1024); \ + vec_base = .; \ + . += 512; + +SECTIONS +{ + . = PAGE_OFFSET + PHYS_OFFSET_OFFSET; + + _start = .; + HEAD_TEXT_SECTION + . = ALIGN(PAGE_SIZE); + + .text : AT(ADDR(.text) - LOAD_OFFSET) { + _text = .; + _stext = .; + VBR_BASE + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.fixup) + *(.gnu.warning) + } = 0 + _etext = .; + + /* __init_begin __init_end must be page aligned for free_initmem */ + . = ALIGN(PAGE_SIZE); + __init_begin = .; + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(PAGE_SIZE) + PERCPU_SECTION(L1_CACHE_BYTES) + . = ALIGN(PAGE_SIZE); + __init_end = .; + + _sdata = .; + RO_DATA(PAGE_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + _edata = .; + +#ifdef CONFIG_HAVE_TCM + .tcm_start : { + . = ALIGN(PAGE_SIZE); + __tcm_start = .; + } + + .text_data_tcm FIXADDR_TCM : AT(__tcm_start) + { + . = ALIGN(4); + __stcm_text_data = .; + *(.tcm.text) + *(.tcm.rodata) +#ifndef CONFIG_HAVE_DTCM + *(.tcm.data) +#endif + . = ALIGN(4); + __etcm_text_data = .; + } + + . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm); + +#ifdef CONFIG_HAVE_DTCM + #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE + + .dtcm_start : { + __dtcm_start = .; + } + + .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start) + { + . = ALIGN(4); + __stcm_data = .; + *(.tcm.data) + . = ALIGN(4); + __etcm_data = .; + } + + . = ADDR(.dtcm_start) + SIZEOF(.data_tcm); + + .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) { +#else + .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) { +#endif + . = ALIGN(PAGE_SIZE); + __tcm_end = .; + } +#endif + + EXCEPTION_TABLE(L1_CACHE_BYTES) + BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) + _end = . ; + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} diff --git a/arch/csky/lib/Makefile b/arch/csky/lib/Makefile new file mode 100644 index 0000000000..d0ce6e2d7a --- /dev/null +++ b/arch/csky/lib/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +lib-y := usercopy.o delay.o +obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o +ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_STRING_OPS), y) +lib-y += string.o +endif diff --git a/arch/csky/lib/delay.c b/arch/csky/lib/delay.c new file mode 100644 index 0000000000..f5db317313 --- /dev/null +++ b/arch/csky/lib/delay.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. +#include +#include +#include +#include + +void __aligned(8) __delay(unsigned long loops) +{ + asm volatile ( + "mov r0, r0\n" + "1:declt %0\n" + "bf 1b" + : "=r"(loops) + : "0"(loops)); +} +EXPORT_SYMBOL(__delay); + +void __const_udelay(unsigned long xloops) +{ + unsigned long long loops; + + loops = (unsigned long long)xloops * loops_per_jiffy * HZ; + + __delay(loops >> 32); +} +EXPORT_SYMBOL(__const_udelay); + +void __udelay(unsigned long usecs) +{ + __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ +} +EXPORT_SYMBOL(__udelay); + +void __ndelay(unsigned long nsecs) +{ + __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/csky/lib/error-inject.c b/arch/csky/lib/error-inject.c new file mode 100644 index 0000000000..c15fb36fe0 --- /dev/null +++ b/arch/csky/lib/error-inject.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void override_function_with_return(struct pt_regs *regs) +{ + instruction_pointer_set(regs, regs->lr); +} +NOKPROBE_SYMBOL(override_function_with_return); diff --git a/arch/csky/lib/string.c b/arch/csky/lib/string.c new file mode 100644 index 0000000000..d65626fcae --- /dev/null +++ b/arch/csky/lib/string.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * String functions optimized for hardware which doesn't + * handle unaligned memory accesses efficiently. + * + * Copyright (C) 2021 Matteo Croce + */ + +#include +#include + +/* Minimum size for a word copy to be convenient */ +#define BYTES_LONG sizeof(long) +#define WORD_MASK (BYTES_LONG - 1) +#define MIN_THRESHOLD (BYTES_LONG * 2) + +/* convenience union to avoid cast between different pointer types */ +union types { + u8 *as_u8; + unsigned long *as_ulong; + uintptr_t as_uptr; +}; + +union const_types { + const u8 *as_u8; + unsigned long *as_ulong; + uintptr_t as_uptr; +}; + +void *memcpy(void *dest, const void *src, size_t count) +{ + union const_types s = { .as_u8 = src }; + union types d = { .as_u8 = dest }; + int distance = 0; + + if (count < MIN_THRESHOLD) + goto copy_remainder; + + /* Copy a byte at time until destination is aligned. */ + for (; d.as_uptr & WORD_MASK; count--) + *d.as_u8++ = *s.as_u8++; + + distance = s.as_uptr & WORD_MASK; + + if (distance) { + unsigned long last, next; + + /* + * s is distance bytes ahead of d, and d just reached + * the alignment boundary. Move s backward to word align it + * and shift data to compensate for distance, in order to do + * word-by-word copy. + */ + s.as_u8 -= distance; + + next = s.as_ulong[0]; + for (; count >= BYTES_LONG; count -= BYTES_LONG) { + last = next; + next = s.as_ulong[1]; + + d.as_ulong[0] = last >> (distance * 8) | + next << ((BYTES_LONG - distance) * 8); + + d.as_ulong++; + s.as_ulong++; + } + + /* Restore s with the original offset. */ + s.as_u8 += distance; + } else { + /* + * If the source and dest lower bits are the same, do a simple + * 32/64 bit wide copy. + */ + for (; count >= BYTES_LONG; count -= BYTES_LONG) + *d.as_ulong++ = *s.as_ulong++; + } + +copy_remainder: + while (count--) + *d.as_u8++ = *s.as_u8++; + + return dest; +} +EXPORT_SYMBOL(memcpy); + +/* + * Simply check if the buffer overlaps an call memcpy() in case, + * otherwise do a simple one byte at time backward copy. + */ +void *memmove(void *dest, const void *src, size_t count) +{ + if (dest < src || src + count <= dest) + return memcpy(dest, src, count); + + if (dest > src) { + const char *s = src + count; + char *tmp = dest + count; + + while (count--) + *--tmp = *--s; + } + return dest; +} +EXPORT_SYMBOL(memmove); + +void *memset(void *s, int c, size_t count) +{ + union types dest = { .as_u8 = s }; + + if (count >= MIN_THRESHOLD) { + unsigned long cu = (unsigned long)c; + + /* Compose an ulong with 'c' repeated 4/8 times */ + cu |= cu << 8; + cu |= cu << 16; + /* Suppress warning on 32 bit machines */ + cu |= (cu << 16) << 16; + + for (; count && dest.as_uptr & WORD_MASK; count--) + *dest.as_u8++ = c; + + /* Copy using the largest size allowed */ + for (; count >= BYTES_LONG; count -= BYTES_LONG) + *dest.as_ulong++ = cu; + } + + /* copy the remainder */ + while (count--) + *dest.as_u8++ = c; + + return s; +} +EXPORT_SYMBOL(memset); diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c new file mode 100644 index 0000000000..3c01c54421 --- /dev/null +++ b/arch/csky/lib/usercopy.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include + +unsigned long raw_copy_from_user(void *to, const void *from, + unsigned long n) +{ + int tmp, nsave; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 7f \n" + " mov %3, %1 \n" + " or %3, %2 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 16 \n" + " bt 3f \n" + "2: ldw %3, (%2, 0) \n" + "10: ldw %4, (%2, 4) \n" + " stw %3, (%1, 0) \n" + " stw %4, (%1, 4) \n" + "11: ldw %3, (%2, 8) \n" + "12: ldw %4, (%2, 12) \n" + " stw %3, (%1, 8) \n" + " stw %4, (%1, 12) \n" + " addi %2, 16 \n" + " addi %1, 16 \n" + " subi %0, 16 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" + " bt 5f \n" + "4: ldw %3, (%2, 0) \n" + " stw %3, (%1, 0) \n" + " addi %2, 4 \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" + " bf 7f \n" + "6: ldb %3, (%2, 0) \n" + " stb %3, (%1, 0) \n" + " addi %2, 1 \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + "8: stw %3, (%1, 0) \n" + " subi %0, 4 \n" + " bf 7f \n" + "9: subi %0, 8 \n" + " bf 7f \n" + "13: stw %3, (%1, 8) \n" + " subi %0, 12 \n" + " bf 7f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 7f \n" + ".long 4b, 7f \n" + ".long 6b, 7f \n" + ".long 10b, 8b \n" + ".long 11b, 9b \n" + ".long 12b,13b \n" + ".previous \n" + "7: \n" + : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), + "=r"(tmp) + : "0"(n), "1"(to), "2"(from) + : "memory"); + + return n; +} +EXPORT_SYMBOL(raw_copy_from_user); + +unsigned long raw_copy_to_user(void *to, const void *from, + unsigned long n) +{ + int w0, w1, w2, w3; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 8f \n" + " mov %3, %1 \n" + " or %3, %2 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 16 \n" /* 4W */ + " bt 3f \n" + " ldw %3, (%2, 0) \n" + " ldw %4, (%2, 4) \n" + " ldw %5, (%2, 8) \n" + " ldw %6, (%2, 12) \n" + "2: stw %3, (%1, 0) \n" + "9: stw %4, (%1, 4) \n" + "10: stw %5, (%1, 8) \n" + "11: stw %6, (%1, 12) \n" + " addi %2, 16 \n" + " addi %1, 16 \n" + " subi %0, 16 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" /* 1W */ + " bt 5f \n" + " ldw %3, (%2, 0) \n" + "4: stw %3, (%1, 0) \n" + " addi %2, 4 \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" /* 1B */ + " bf 13f \n" + " ldb %3, (%2, 0) \n" + "6: stb %3, (%1, 0) \n" + " addi %2, 1 \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + "7: subi %0, 4 \n" + "8: subi %0, 4 \n" + "12: subi %0, 4 \n" + " br 13f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 13f \n" + ".long 4b, 13f \n" + ".long 6b, 13f \n" + ".long 9b, 12b \n" + ".long 10b, 8b \n" + ".long 11b, 7b \n" + ".previous \n" + "13: \n" + : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), + "=r"(w1), "=r"(w2), "=r"(w3) + : "0"(n), "1"(to), "2"(from) + : "memory"); + + return n; +} +EXPORT_SYMBOL(raw_copy_to_user); + +/* + * __clear_user: - Zero a block of memory in user space, with less checking. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * + * Zero a block of memory in user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +unsigned long +__clear_user(void __user *to, unsigned long n) +{ + int data, value, tmp; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 7f \n" + " mov %3, %1 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 32 \n" /* 4W */ + " bt 3f \n" + "8: stw %2, (%1, 0) \n" + "10: stw %2, (%1, 4) \n" + "11: stw %2, (%1, 8) \n" + "12: stw %2, (%1, 12) \n" + "13: stw %2, (%1, 16) \n" + "14: stw %2, (%1, 20) \n" + "15: stw %2, (%1, 24) \n" + "16: stw %2, (%1, 28) \n" + " addi %1, 32 \n" + " subi %0, 32 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" /* 1W */ + " bt 5f \n" + "4: stw %2, (%1, 0) \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" /* 1B */ + "9: bf 7f \n" + "6: stb %2, (%1, 0) \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + ".section __ex_table,\"a\" \n" + ".align 2 \n" + ".long 8b, 9b \n" + ".long 10b, 9b \n" + ".long 11b, 9b \n" + ".long 12b, 9b \n" + ".long 13b, 9b \n" + ".long 14b, 9b \n" + ".long 15b, 9b \n" + ".long 16b, 9b \n" + ".long 4b, 9b \n" + ".long 6b, 9b \n" + ".previous \n" + "7: \n" + : "=r"(n), "=r" (data), "=r"(value), "=r"(tmp) + : "0"(n), "1"(to), "2"(0) + : "memory"); + + return n; +} +EXPORT_SYMBOL(__clear_user); diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile new file mode 100644 index 0000000000..6e7696e55f --- /dev/null +++ b/arch/csky/mm/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) +obj-y += cachev2.o +CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE) +else +obj-y += cachev1.o +CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE) +endif + +obj-y += dma-mapping.o +obj-y += fault.o +obj-$(CONFIG_HIGHMEM) += highmem.o +obj-y += init.o +obj-y += ioremap.o +obj-y += syscache.o +obj-y += tlb.o +obj-y += asid.o +obj-y += context.o +obj-$(CONFIG_HAVE_TCM) += tcm.o diff --git a/arch/csky/mm/asid.c b/arch/csky/mm/asid.c new file mode 100644 index 0000000000..7fb6c417bb --- /dev/null +++ b/arch/csky/mm/asid.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic ASID allocator. + * + * Based on arch/arm/mm/context.c + * + * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include + +#include + +#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu) + +#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0)) +#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits)) + +#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) +#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info)) + +static void flush_context(struct asid_info *info) +{ + int i; + u64 asid; + + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_zero(info->map, NUM_CTXT_ASIDS(info)); + + for_each_possible_cpu(i) { + asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = reserved_asid(info, i); + __set_bit(asid2idx(info, asid), info->map); + reserved_asid(info, i) = asid; + } + + /* + * Queue a TLB invalidation for each CPU to perform on next + * context-switch + */ + cpumask_setall(&info->flush_pending); +} + +static bool check_update_reserved_asid(struct asid_info *info, u64 asid, + u64 newasid) +{ + int cpu; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (reserved_asid(info, cpu) == asid) { + hit = true; + reserved_asid(info, cpu) = newasid; + } + } + + return hit; +} + +static u64 new_context(struct asid_info *info, atomic64_t *pasid, + struct mm_struct *mm) +{ + static u32 cur_idx = 1; + u64 asid = atomic64_read(pasid); + u64 generation = atomic64_read(&info->generation); + + if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK(info)); + + /* + * If our current ASID was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_asid(info, asid, newasid)) + return newasid; + + /* + * We had a valid ASID in a previous life, so try to re-use + * it if possible. + */ + if (!__test_and_set_bit(asid2idx(info, asid), info->map)) + return newasid; + } + + /* + * Allocate a free ASID. If we can't find one, take a note of the + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. + */ + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx); + if (asid != NUM_CTXT_ASIDS(info)) + goto set_asid; + + /* We're out of ASIDs, so increment the global generation count */ + generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info), + &info->generation); + flush_context(info); + + /* We have more ASIDs than CPUs, so this will always succeed */ + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1); + +set_asid: + __set_bit(asid, info->map); + cur_idx = asid; + cpumask_clear(mm_cpumask(mm)); + return idx2asid(info, asid) | generation; +} + +/* + * Generate a new ASID for the context. + * + * @pasid: Pointer to the current ASID batch allocated. It will be updated + * with the new ASID batch. + * @cpu: current CPU ID. Must have been acquired through get_cpu() + */ +void asid_new_context(struct asid_info *info, atomic64_t *pasid, + unsigned int cpu, struct mm_struct *mm) +{ + unsigned long flags; + u64 asid; + + raw_spin_lock_irqsave(&info->lock, flags); + /* Check that our ASID belongs to the current generation. */ + asid = atomic64_read(pasid); + if ((asid ^ atomic64_read(&info->generation)) >> info->bits) { + asid = new_context(info, pasid, mm); + atomic64_set(pasid, asid); + } + + if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending)) + info->flush_cpu_ctxt_cb(); + + atomic64_set(&active_asid(info, cpu), asid); + cpumask_set_cpu(cpu, mm_cpumask(mm)); + raw_spin_unlock_irqrestore(&info->lock, flags); +} + +/* + * Initialize the ASID allocator + * + * @info: Pointer to the asid allocator structure + * @bits: Number of ASIDs available + * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are + * allocated contiguously for a given context. This value should be a power of + * 2. + */ +int asid_allocator_init(struct asid_info *info, + u32 bits, unsigned int asid_per_ctxt, + void (*flush_cpu_ctxt_cb)(void)) +{ + info->bits = bits; + info->ctxt_shift = ilog2(asid_per_ctxt); + info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb; + /* + * Expect allocation after rollover to fail if we don't have at least + * one more ASID than CPUs. ASID #0 is always reserved. + */ + WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); + atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); + info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL); + if (!info->map) + return -ENOMEM; + + raw_spin_lock_init(&info->lock); + + return 0; +} diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c new file mode 100644 index 0000000000..5a5a9804a0 --- /dev/null +++ b/arch/csky/mm/cachev1.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include + +/* for L1-cache */ +#define INS_CACHE (1 << 0) +#define DATA_CACHE (1 << 1) +#define CACHE_INV (1 << 4) +#define CACHE_CLR (1 << 5) +#define CACHE_OMS (1 << 6) +#define CACHE_ITS (1 << 7) +#define CACHE_LICF (1 << 31) + +/* for L2-cache */ +#define CR22_LEVEL_SHIFT (1) +#define CR22_SET_SHIFT (7) +#define CR22_WAY_SHIFT (30) +#define CR22_WAY_SHIFT_L2 (29) + +static DEFINE_SPINLOCK(cache_lock); + +static inline void cache_op_line(unsigned long i, unsigned int val) +{ + mtcr("cr22", i); + mtcr("cr17", val); +} + +#define CCR2_L2E (1 << 3) +static void cache_op_all(unsigned int value, unsigned int l2) +{ + mtcr("cr17", value | CACHE_CLR); + mb(); + + if (l2 && (mfcr_ccr2() & CCR2_L2E)) { + mtcr("cr24", value | CACHE_CLR); + mb(); + } +} + +static void cache_op_range( + unsigned int start, + unsigned int end, + unsigned int value, + unsigned int l2) +{ + unsigned long i, flags; + unsigned int val = value | CACHE_CLR | CACHE_OMS; + bool l2_sync; + + if (unlikely((end - start) >= PAGE_SIZE) || + unlikely(start < PAGE_OFFSET) || + unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) { + cache_op_all(value, l2); + return; + } + + if ((mfcr_ccr2() & CCR2_L2E) && l2) + l2_sync = 1; + else + l2_sync = 0; + + spin_lock_irqsave(&cache_lock, flags); + + i = start & ~(L1_CACHE_BYTES - 1); + for (; i < end; i += L1_CACHE_BYTES) { + cache_op_line(i, val); + if (l2_sync) { + mb(); + mtcr("cr24", val); + } + } + spin_unlock_irqrestore(&cache_lock, flags); + + mb(); +} + +void dcache_wb_line(unsigned long start) +{ + asm volatile("idly4\n":::"memory"); + cache_op_line(start, DATA_CACHE|CACHE_CLR); + mb(); +} + +void icache_inv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, INS_CACHE|CACHE_INV, 0); +} + +void icache_inv_all(void) +{ + cache_op_all(INS_CACHE|CACHE_INV, 0); +} + +void local_icache_inv_all(void *priv) +{ + cache_op_all(INS_CACHE|CACHE_INV, 0); +} + +void dcache_wb_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); +} + +void dcache_wbinv_all(void) +{ + cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} + +void cache_wbinv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} +EXPORT_SYMBOL(cache_wbinv_range); + +void cache_wbinv_all(void) +{ + cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} + +void dma_wbinv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); +} + +void dma_inv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); +} + +void dma_wb_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); +} diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c new file mode 100644 index 0000000000..7a9664adce --- /dev/null +++ b/arch/csky/mm/cachev2.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include + +/* for L1-cache */ +#define INS_CACHE (1 << 0) +#define DATA_CACHE (1 << 1) +#define CACHE_INV (1 << 4) +#define CACHE_CLR (1 << 5) +#define CACHE_OMS (1 << 6) + +void local_icache_inv_all(void *priv) +{ + mtcr("cr17", INS_CACHE|CACHE_INV); + sync_is(); +} + +#ifdef CONFIG_CPU_HAS_ICACHE_INS +void icache_inv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("icache.iva %0\n"::"r"(i):"memory"); + sync_is(); +} +#else +struct cache_range { + unsigned long start; + unsigned long end; +}; + +static DEFINE_SPINLOCK(cache_lock); + +static inline void cache_op_line(unsigned long i, unsigned int val) +{ + mtcr("cr22", i); + mtcr("cr17", val); +} + +void local_icache_inv_range(void *priv) +{ + struct cache_range *param = priv; + unsigned long i = param->start & ~(L1_CACHE_BYTES - 1); + unsigned long flags; + + spin_lock_irqsave(&cache_lock, flags); + + for (; i < param->end; i += L1_CACHE_BYTES) + cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS); + + spin_unlock_irqrestore(&cache_lock, flags); + + sync_is(); +} + +void icache_inv_range(unsigned long start, unsigned long end) +{ + struct cache_range param = { start, end }; + + if (irqs_disabled()) + local_icache_inv_range(¶m); + else + on_each_cpu(local_icache_inv_range, ¶m, 1); +} +#endif + +inline void dcache_wb_line(unsigned long start) +{ + asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); + sync_is(); +} + +void dcache_wb_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); + sync_is(); +} + +void cache_wbinv_range(unsigned long start, unsigned long end) +{ + dcache_wb_range(start, end); + icache_inv_range(start, end); +} +EXPORT_SYMBOL(cache_wbinv_range); + +void dma_wbinv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + sync_is(); +} + +void dma_inv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.iva %0\n"::"r"(i):"memory"); + sync_is(); +} + +void dma_wb_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.cva %0\n"::"r"(i):"memory"); + sync_is(); +} diff --git a/arch/csky/mm/context.c b/arch/csky/mm/context.c new file mode 100644 index 0000000000..0d95bdd938 --- /dev/null +++ b/arch/csky/mm/context.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +#include +#include +#include +#include + +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); + +struct asid_info asid_info; + +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) +{ + asid_check_context(&asid_info, &mm->context.asid, cpu, mm); +} + +static void asid_flush_cpu_ctxt(void) +{ + local_tlb_invalid_all(); +} + +static int asids_init(void) +{ + BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus()); + + if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1, + asid_flush_cpu_ctxt)) + panic("Unable to initialize ASID allocator for %lu ASIDs\n", + NUM_ASIDS(&asid_info)); + + asid_info.active = &active_asids; + asid_info.reserved = &reserved_asids; + + pr_info("ASID allocator initialised with %lu entries\n", + NUM_CTXT_ASIDS(&asid_info)); + + return 0; +} +early_initcall(asids_init); diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c new file mode 100644 index 0000000000..82447029fe --- /dev/null +++ b/arch/csky/mm/dma-mapping.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline void cache_op(phys_addr_t paddr, size_t size, + void (*fn)(unsigned long start, unsigned long end)) +{ + struct page *page = phys_to_page(paddr); + void *start = __va(page_to_phys(page)); + unsigned long offset = offset_in_page(paddr); + size_t left = size; + + do { + size_t len = left; + + if (offset + len > PAGE_SIZE) + len = PAGE_SIZE - offset; + + if (PageHighMem(page)) { + start = kmap_atomic(page); + + fn((unsigned long)start + offset, + (unsigned long)start + offset + len); + + kunmap_atomic(start); + } else { + fn((unsigned long)start + offset, + (unsigned long)start + offset + len); + } + offset = 0; + + page++; + start += PAGE_SIZE; + left -= len; + } while (left); +} + +static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end) +{ + memset((void *)start, 0, end - start); + dma_wbinv_range(start, end); +} + +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range); +} + +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + cache_op(paddr, size, dma_wb_range); + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, dma_wbinv_range); + break; + default: + BUG(); + } +} + +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + return; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, dma_inv_range); + break; + default: + BUG(); + } +} diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c new file mode 100644 index 0000000000..a885518ce1 --- /dev/null +++ b/arch/csky/mm/fault.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(instruction_pointer(regs)); + if (fixup) { + regs->pc = fixup->fixup; + + return 1; + } + + return 0; +} + +static inline bool is_write(struct pt_regs *regs) +{ + switch (trap_no(regs)) { + case VEC_TLBINVALIDS: + return true; + case VEC_TLBMODIFIED: + return true; + } + + return false; +} + +#ifdef CONFIG_CPU_HAS_LDSTEX +static inline void csky_cmpxchg_fixup(struct pt_regs *regs) +{ + return; +} +#else +extern unsigned long csky_cmpxchg_ldw; +extern unsigned long csky_cmpxchg_stw; +static inline void csky_cmpxchg_fixup(struct pt_regs *regs) +{ + if (trap_no(regs) != VEC_TLBMODIFIED) + return; + + if (instruction_pointer(regs) == csky_cmpxchg_stw) + instruction_pointer_set(regs, csky_cmpxchg_ldw); + return; +} +#endif + +static inline void no_context(struct pt_regs *regs, unsigned long addr) +{ + current->thread.trap_no = trap_no(regs); + + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + pr_alert("Unable to handle kernel paging request at virtual " + "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc); + die(regs, "Oops"); + make_task_dead(SIGKILL); +} + +static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) +{ + current->thread.trap_no = trap_no(regs); + + if (fault & VM_FAULT_OOM) { + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + if (!user_mode(regs)) { + no_context(regs, addr); + return; + } + pagefault_out_of_memory(); + return; + } else if (fault & VM_FAULT_SIGBUS) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) { + no_context(regs, addr); + return; + } + do_trap(regs, SIGBUS, BUS_ADRERR, addr); + return; + } + BUG(); +} + +static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) +{ + /* + * Something tried to access memory that isn't in our memory map. + * Fix it, but check if it's kernel or user first. + */ + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + do_trap(regs, SIGSEGV, code, addr); + return; + } + + no_context(regs, addr); +} + +static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr) +{ + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + int offset; + + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + do_trap(regs, SIGSEGV, code, addr); + return; + } + + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + offset = pgd_index(addr); + + pgd = get_pgd() + offset; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd_k)) { + no_context(regs, addr); + return; + } + set_pgd(pgd, *pgd_k); + + pud = (pud_t *)pgd; + pud_k = (pud_t *)pgd_k; + if (!pud_present(*pud_k)) { + no_context(regs, addr); + return; + } + + pmd = pmd_offset(pud, addr); + pmd_k = pmd_offset(pud_k, addr); + if (!pmd_present(*pmd_k)) { + no_context(regs, addr); + return; + } + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, addr); + if (!pte_present(*pte_k)) { + no_context(regs, addr); + return; + } + + flush_tlb_one(addr); +} + +static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma) +{ + if (is_write(regs)) { + if (!(vma->vm_flags & VM_WRITE)) + return true; + } else { + if (unlikely(!vma_is_accessible(vma))) + return true; + } + return false; +} + +/* + * This routine handles page faults. It determines the address and the + * problem, and then passes it off to one of the appropriate routines. + */ +asmlinkage void do_page_fault(struct pt_regs *regs) +{ + struct task_struct *tsk; + struct vm_area_struct *vma; + struct mm_struct *mm; + unsigned long addr = read_mmu_entryhi() & PAGE_MASK; + unsigned int flags = FAULT_FLAG_DEFAULT; + int code = SEGV_MAPERR; + vm_fault_t fault; + + tsk = current; + mm = tsk->mm; + + csky_cmpxchg_fixup(regs); + + if (kprobe_page_fault(regs, tsk->thread.trap_no)) + return; + + /* + * Fault-in kernel-space virtual memory on-demand. + * The 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) { + vmalloc_fault(regs, code, addr); + return; + } + + /* Enable interrupts if they were enabled in the parent context. */ + if (likely(regs->sr & BIT(6))) + local_irq_enable(); + + /* + * If we're in an interrupt, have no user context, or are running + * in an atomic region, then we must not take the fault. + */ + if (unlikely(faulthandler_disabled() || !mm)) { + no_context(regs, addr); + return; + } + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + + if (is_write(regs)) + flags |= FAULT_FLAG_WRITE; +retry: + vma = lock_mm_and_find_vma(mm, addr, regs); + if (unlikely(!vma)) { + bad_area_nosemaphore(regs, mm, code, addr); + return; + } + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it. + */ + code = SEGV_ACCERR; + + if (unlikely(access_error(regs, vma))) { + mmap_read_unlock(mm); + bad_area_nosemaphore(regs, mm, code, addr); + return; + } + + /* + * If for any reason at all we could not handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, addr, flags, regs); + + /* + * If we need to retry but a fatal signal is pending, handle the + * signal first. We do not need to release the mmap_lock because it + * would already be released in __lock_page_or_retry in mm/filemap.c. + */ + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, addr); + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + + if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { + flags |= FAULT_FLAG_TRIED; + + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + goto retry; + } + + mmap_read_unlock(mm); + + if (unlikely(fault & VM_FAULT_ERROR)) { + mm_fault_error(regs, addr, fault); + return; + } + return; +} diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c new file mode 100644 index 0000000000..4161df3c6c --- /dev/null +++ b/arch/csky/mm/highmem.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include + +unsigned long highstart_pfn, highend_pfn; + +void kmap_flush_tlb(unsigned long addr) +{ + flush_tlb_one(addr); +} +EXPORT_SYMBOL(kmap_flush_tlb); + +void __init kmap_init(void) +{ + unsigned long vaddr; + pgd_t *pgd; + pmd_t *pmd; + pud_t *pud; + pte_t *pte; + + vaddr = PKMAP_BASE; + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); + + pgd = swapper_pg_dir + pgd_index(vaddr); + pud = (pud_t *)pgd; + pmd = pmd_offset(pud, vaddr); + pte = pte_offset_kernel(pmd, vaddr); + pkmap_page_table = pte; +} diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c new file mode 100644 index 0000000000..bde7cabd23 --- /dev/null +++ b/arch/csky/mm/init.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define PTRS_KERN_TABLE \ + ((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE) + +pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss; + +EXPORT_SYMBOL(invalid_pte_table); +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] + __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); + +#ifdef CONFIG_BLK_DEV_INITRD +static void __init setup_initrd(void) +{ + unsigned long size; + + if (initrd_start >= initrd_end) { + pr_err("initrd not found or empty"); + goto disable; + } + + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + pr_err("initrd extends beyond end of memory"); + goto disable; + } + + size = initrd_end - initrd_start; + + if (memblock_is_region_reserved(__pa(initrd_start), size)) { + pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", + __pa(initrd_start), size); + goto disable; + } + + memblock_reserve(__pa(initrd_start), size); + + pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), size); + + initrd_below_start_ok = 1; + + return; + +disable: + initrd_start = initrd_end = 0; + + pr_err(" - disabling initrd\n"); +} +#endif + +void __init mem_init(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long tmp; + + set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); +#else + set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); +#endif + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + +#ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +#endif + + memblock_free_all(); + +#ifdef CONFIG_HIGHMEM + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { + struct page *page = pfn_to_page(tmp); + + /* FIXME not sure about */ + if (!memblock_is_reserved(tmp << PAGE_SHIFT)) + free_highmem_page(page); + } +#endif +} + +void free_initmem(void) +{ + free_initmem_default(-1); +} + +void pgd_init(unsigned long *p) +{ + int i; + + for (i = 0; i < PTRS_PER_PGD; i++) + p[i] = __pa(invalid_pte_table); + + flush_tlb_all(); + local_icache_inv_all(NULL); +} + +void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) +{ + int i; + + for (i = 0; i < USER_PTRS_PER_PGD; i++) + swapper_pg_dir[i].pgd = __pa(invalid_pte_table); + + for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) + swapper_pg_dir[i].pgd = + __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD))); + + for (i = 0; i < PTRS_KERN_TABLE; i++) + set_pte(&kernel_pte_tables[i], __pte(_PAGE_GLOBAL)); + + for (i = min_pfn; i < max_pfn; i++) + set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL)); + + flush_tlb_all(); + local_icache_inv_all(NULL); + + /* Setup page mask to 4k */ + write_mmu_pagemask(0); + + setup_pgd(swapper_pg_dir, 0); +} + +void __init fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int i, j, k; + unsigned long vaddr; + + vaddr = start; + i = pgd_index(vaddr); + j = pud_index(vaddr); + k = pmd_index(vaddr); + pgd = pgd_base + i; + + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + pud = (pud_t *)pgd; + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + pmd = (pmd_t *)pud; + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + if (pmd_none(*pmd)) { + pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, + PAGE_SIZE); + + set_pmd(pmd, __pmd(__pa(pte))); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + vaddr += PMD_SIZE; + } + k = 0; + } + j = 0; + } +} + +void __init fixaddr_init(void) +{ + unsigned long vaddr; + + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); +} + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READ, + [VM_WRITE] = PAGE_READ, + [VM_WRITE | VM_READ] = PAGE_READ, + [VM_EXEC] = PAGE_READ, + [VM_EXEC | VM_READ] = PAGE_READ, + [VM_EXEC | VM_WRITE] = PAGE_READ, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READ, + [VM_SHARED | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE, + [VM_SHARED | VM_EXEC] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c new file mode 100644 index 0000000000..70c8268d3b --- /dev/null +++ b/arch/csky/mm/ioremap.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) { + return pgprot_noncached(vma_prot); + } else if (file->f_flags & O_SYNC) { + return pgprot_writecombine(vma_prot); + } + + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c new file mode 100644 index 0000000000..cd847ad62c --- /dev/null +++ b/arch/csky/mm/syscache.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +SYSCALL_DEFINE3(cacheflush, + void __user *, addr, + unsigned long, bytes, + int, cache) +{ + switch (cache) { + case BCACHE: + case DCACHE: + dcache_wb_range((unsigned long)addr, + (unsigned long)addr + bytes); + if (cache != BCACHE) + break; + fallthrough; + case ICACHE: + flush_icache_mm_range(current->mm, + (unsigned long)addr, + (unsigned long)addr + bytes); + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/csky/mm/tcm.c b/arch/csky/mm/tcm.c new file mode 100644 index 0000000000..ddeb363288 --- /dev/null +++ b/arch/csky/mm/tcm.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#if (CONFIG_ITCM_RAM_BASE == 0xffffffff) +#error "You should define ITCM_RAM_BASE" +#endif + +#ifdef CONFIG_HAVE_DTCM +#if (CONFIG_DTCM_RAM_BASE == 0xffffffff) +#error "You should define DTCM_RAM_BASE" +#endif + +#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE) +#error "You should define correct DTCM_RAM_BASE" +#endif +#endif + +extern char __tcm_start, __tcm_end, __dtcm_start; + +static struct gen_pool *tcm_pool; + +static void __init tcm_mapping_init(void) +{ + pte_t *tcm_pte; + unsigned long vaddr, paddr; + int i; + + paddr = CONFIG_ITCM_RAM_BASE; + + if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE))) + goto panic; + +#ifndef CONFIG_HAVE_DTCM + for (i = 0; i < TCM_NR_PAGES; i++) { +#else + for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) { +#endif + vaddr = __fix_to_virt(FIX_TCM - i); + + tcm_pte = + pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); + + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); + + flush_tlb_one(vaddr); + + paddr = paddr + PAGE_SIZE; + } + +#ifdef CONFIG_HAVE_DTCM + if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE))) + goto panic; + + paddr = CONFIG_DTCM_RAM_BASE; + + for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) { + vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); + + tcm_pte = + pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); + + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); + + flush_tlb_one(vaddr); + + paddr = paddr + PAGE_SIZE; + } +#endif + +#ifndef CONFIG_HAVE_DTCM + memcpy((void *)__fix_to_virt(FIX_TCM), + &__tcm_start, &__tcm_end - &__tcm_start); + + pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); + + pr_info("%s: __tcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start); +#else + memcpy((void *)__fix_to_virt(FIX_TCM), + &__tcm_start, &__dtcm_start - &__tcm_start); + + pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); + + pr_info("%s: __itcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start); + + memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), + &__dtcm_start, &__tcm_end - &__dtcm_start); + + pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), + CONFIG_DTCM_RAM_BASE); + + pr_info("%s: __dtcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start); + +#endif + return; +panic: + panic("TCM init error"); +} + +void *tcm_alloc(size_t len) +{ + unsigned long vaddr; + + if (!tcm_pool) + return NULL; + + vaddr = gen_pool_alloc(tcm_pool, len); + if (!vaddr) + return NULL; + + return (void *) vaddr; +} +EXPORT_SYMBOL(tcm_alloc); + +void tcm_free(void *addr, size_t len) +{ + gen_pool_free(tcm_pool, (unsigned long) addr, len); +} +EXPORT_SYMBOL(tcm_free); + +static int __init tcm_setup_pool(void) +{ +#ifndef CONFIG_HAVE_DTCM + u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE) + - (u32) (&__tcm_end - &__tcm_start); + + u32 tcm_pool_start = __fix_to_virt(FIX_TCM) + + (u32) (&__tcm_end - &__tcm_start); +#else + u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE) + - (u32) (&__tcm_end - &__dtcm_start); + + u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES) + + (u32) (&__tcm_end - &__dtcm_start); +#endif + int ret; + + tcm_pool = gen_pool_create(2, -1); + + ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1); + if (ret) { + pr_err("%s: gen_pool add failed!\n", __func__); + return ret; + } + + pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n", + __func__, pool_size, tcm_pool_start); + + return 0; +} + +static int __init tcm_init(void) +{ + tcm_mapping_init(); + + tcm_setup_pool(); + + return 0; +} +arch_initcall(tcm_init); diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c new file mode 100644 index 0000000000..9234c5e5ce --- /dev/null +++ b/arch/csky/mm/tlb.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +#include +#include + +/* + * One C-SKY MMU TLB entry contain two PFN/page entry, ie: + * 1VPN -> 2PFN + */ +#define TLB_ENTRY_SIZE (PAGE_SIZE * 2) +#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1) + +void flush_tlb_all(void) +{ + tlb_invalid_all(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + asm volatile( + "tlbi.asids %0 \n" + "sync.i \n" + : + : "r" (cpu_asid(mm)) + : "memory"); +#else + tlb_invalid_all(); +#endif +} + +/* + * MMU operation regs only could invalid tlb entry in jtlb and we + * need change asid field to invalid I-utlb & D-utlb. + */ +#ifndef CONFIG_CPU_HAS_TLBI +#define restore_asid_inv_utlb(oldpid, newpid) \ +do { \ + if (oldpid == newpid) \ + write_mmu_entryhi(oldpid + 1); \ + write_mmu_entryhi(oldpid); \ +} while (0) +#endif + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + unsigned long newpid = cpu_asid(vma->vm_mm); + + start &= TLB_ENTRY_SIZE_MASK; + end += TLB_ENTRY_SIZE - 1; + end &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + while (start < end) { + asm volatile( + "tlbi.vas %0 \n" + : + : "r" (start | newpid) + : "memory"); + + start += 2*PAGE_SIZE; + } + asm volatile("sync.i\n"); +#else + { + unsigned long flags, oldpid; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + while (start < end) { + int idx; + + write_mmu_entryhi(start | newpid); + start += 2*PAGE_SIZE; + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, newpid); + local_irq_restore(flags); + } +#endif +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + start &= TLB_ENTRY_SIZE_MASK; + end += TLB_ENTRY_SIZE - 1; + end &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + while (start < end) { + asm volatile( + "tlbi.vaas %0 \n" + : + : "r" (start) + : "memory"); + + start += 2*PAGE_SIZE; + } + asm volatile("sync.i\n"); +#else + { + unsigned long flags, oldpid; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + while (start < end) { + int idx; + + write_mmu_entryhi(start | oldpid); + start += 2*PAGE_SIZE; + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + int newpid = cpu_asid(vma->vm_mm); + + addr &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + asm volatile( + "tlbi.vas %0 \n" + "sync.i \n" + : + : "r" (addr | newpid) + : "memory"); +#else + { + int oldpid, idx; + unsigned long flags; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + write_mmu_entryhi(addr | newpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, newpid); + local_irq_restore(flags); + } +#endif +} + +void flush_tlb_one(unsigned long addr) +{ + addr &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + asm volatile( + "tlbi.vaas %0 \n" + "sync.i \n" + : + : "r" (addr) + : "memory"); +#else + { + int oldpid, idx; + unsigned long flags; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + write_mmu_entryhi(addr | oldpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif +} +EXPORT_SYMBOL(flush_tlb_one); -- cgit v1.2.3