diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /arch/loongarch | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/loongarch')
203 files changed, 25183 insertions, 0 deletions
diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild new file mode 100644 index 000000000..b01f5cdb2 --- /dev/null +++ b/arch/loongarch/Kbuild @@ -0,0 +1,7 @@ +obj-y += kernel/ +obj-y += mm/ +obj-y += net/ +obj-y += vdso/ + +# for cleaning +subdir- += boot diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig new file mode 100644 index 000000000..e737dc8cd --- /dev/null +++ b/arch/loongarch/Kconfig @@ -0,0 +1,522 @@ +# SPDX-License-Identifier: GPL-2.0 +config LOONGARCH + bool + default y + select ACPI + select ACPI_GENERIC_GSI if ACPI + select ACPI_MCFG if ACPI + select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI + select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_INLINE_READ_LOCK if !PREEMPTION + select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_KEEP_MEMBLOCK + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_SPARSEMEM_ENABLE + select ARCH_STACKWALK + select ARCH_SUPPORTS_ACPI + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_HUGETLBFS + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT + select ARCH_WANT_LD_ORPHAN_WARN + select ARCH_WANTS_NO_INSTR + select BUILDTIME_TABLE_SORT + select COMMON_CLK + select EFI + select GENERIC_CLOCKEVENTS + select GENERIC_CMOS_UPDATE + select GENERIC_CPU_AUTOPROBE + select GENERIC_ENTRY + select GENERIC_GETTIMEOFDAY + select GENERIC_IOREMAP if !ARCH_IOREMAP + select GENERIC_IRQ_MULTI_HANDLER + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_LIB_ASHLDI3 + select GENERIC_LIB_ASHRDI3 + select GENERIC_LIB_CMPDI2 + select GENERIC_LIB_LSHRDI3 + select GENERIC_LIB_UCMPDI2 + select GENERIC_LIB_DEVMEM_IS_ALLOWED + select GENERIC_PCI_IOMAP + select GENERIC_SCHED_CLOCK + select GENERIC_SMP_IDLE_THREAD + select GENERIC_TIME_VSYSCALL + select GPIOLIB + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_MMAP_RND_BITS if MMU + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ASM_MODVERSIONS + select HAVE_CONTEXT_TRACKING_USER + select HAVE_DEBUG_STACKOVERFLOW + select HAVE_DMA_CONTIGUOUS + select HAVE_EBPF_JIT + select HAVE_EXIT_THREAD + select HAVE_FAST_GUP + select HAVE_GENERIC_VDSO + select HAVE_IOREMAP_PROT + select HAVE_IRQ_EXIT_ON_IRQ_STACK + select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_NMI + select HAVE_PCI + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ + select HAVE_SETUP_PER_CPU_AREA if NUMA + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_TIF_NOHZ + select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP + select IRQ_FORCED_THREADING + select IRQ_LOONGARCH_CPU + select LOCK_MM_AND_FIND_VMA + select MMU_GATHER_MERGE_VMAS if MMU + select MODULES_USE_ELF_RELA if MODULES + select NEED_PER_CPU_EMBED_FIRST_CHUNK + select NEED_PER_CPU_PAGE_FIRST_CHUNK + select PCI + select PCI_DOMAINS_GENERIC + select PCI_ECAM if ACPI + select PCI_LOONGSON + select PCI_MSI_ARCH_FALLBACKS + select PCI_QUIRKS + select PERF_USE_VMALLOC + select RTC_LIB + select SMP + select SPARSE_IRQ + select SYSCTL_EXCEPTION_TRACE + select SWIOTLB + select TRACE_IRQFLAGS_SUPPORT + select USE_PERCPU_NUMA_NODE_ID + select USER_STACKTRACE_SUPPORT + select ZONE_DMA32 + +config 32BIT + bool + +config 64BIT + def_bool y + +config CPU_HAS_FPU + bool + default y + +config CPU_HAS_PREFETCH + bool + default y + +config GENERIC_BUG + def_bool y + depends on BUG + +config GENERIC_BUG_RELATIVE_POINTERS + def_bool y + depends on GENERIC_BUG + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config GENERIC_CSUM + def_bool y + +config GENERIC_HWEIGHT + def_bool y + +config L1_CACHE_SHIFT + int + default "6" + +config LOCKDEP_SUPPORT + bool + default y + +config STACKTRACE_SUPPORT + bool + default y + +# MACH_LOONGSON32 and MACH_LOONGSON64 are deliberately carried over from the +# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that +# are shared between architectures, and specifically expecting the symbols. +config MACH_LOONGSON32 + def_bool 32BIT + +config MACH_LOONGSON64 + def_bool 64BIT + +config FIX_EARLYCON_MEM + def_bool y + +config PAGE_SIZE_4KB + bool + +config PAGE_SIZE_16KB + bool + +config PAGE_SIZE_64KB + bool + +config PGTABLE_2LEVEL + bool + +config PGTABLE_3LEVEL + bool + +config PGTABLE_4LEVEL + bool + +config PGTABLE_LEVELS + int + default 2 if PGTABLE_2LEVEL + default 3 if PGTABLE_3LEVEL + default 4 if PGTABLE_4LEVEL + +config SCHED_OMIT_FRAME_POINTER + bool + default y + +config AS_HAS_EXPLICIT_RELOCS + def_bool $(as-instr,x:pcalau12i \$t0$(comma)%pc_hi20(x)) + +menu "Kernel type and options" + +source "kernel/Kconfig.hz" + +choice + prompt "Page Table Layout" + default 16KB_2LEVEL if 32BIT + default 16KB_3LEVEL if 64BIT + help + Allows choosing the page table layout, which is a combination + of page size and page table levels. The size of virtual memory + address space are determined by the page table layout. + +config 4KB_3LEVEL + bool "4KB with 3 levels" + select PAGE_SIZE_4KB + select PGTABLE_3LEVEL + help + This option selects 4KB page size with 3 level page tables, which + support a maximum of 39 bits of application virtual memory. + +config 4KB_4LEVEL + bool "4KB with 4 levels" + select PAGE_SIZE_4KB + select PGTABLE_4LEVEL + help + This option selects 4KB page size with 4 level page tables, which + support a maximum of 48 bits of application virtual memory. + +config 16KB_2LEVEL + bool "16KB with 2 levels" + select PAGE_SIZE_16KB + select PGTABLE_2LEVEL + help + This option selects 16KB page size with 2 level page tables, which + support a maximum of 36 bits of application virtual memory. + +config 16KB_3LEVEL + bool "16KB with 3 levels" + select PAGE_SIZE_16KB + select PGTABLE_3LEVEL + help + This option selects 16KB page size with 3 level page tables, which + support a maximum of 47 bits of application virtual memory. + +config 64KB_2LEVEL + bool "64KB with 2 levels" + select PAGE_SIZE_64KB + select PGTABLE_2LEVEL + help + This option selects 64KB page size with 2 level page tables, which + support a maximum of 42 bits of application virtual memory. + +config 64KB_3LEVEL + bool "64KB with 3 levels" + select PAGE_SIZE_64KB + select PGTABLE_3LEVEL + help + This option selects 64KB page size with 3 level page tables, which + support a maximum of 55 bits of application virtual memory. + +endchoice + +config CMDLINE + string "Built-in kernel command line" + help + For most platforms, the arguments for the kernel's command line + are provided at run-time, during boot. However, there are cases + where either no arguments are being provided or the provided + arguments are insufficient or even invalid. + + When that occurs, it is possible to define a built-in command + line here and choose how the kernel should use it later on. + +choice + prompt "Kernel command line type" + default CMDLINE_BOOTLOADER + help + Choose how the kernel will handle the provided built-in command + line. + +config CMDLINE_BOOTLOADER + bool "Use bootloader kernel arguments if available" + help + Prefer the command-line passed by the boot loader if available. + Use the built-in command line as fallback in case we get nothing + during boot. This is the default behaviour. + +config CMDLINE_EXTEND + bool "Use built-in to extend bootloader kernel arguments" + help + The command-line arguments provided during boot will be + appended to the built-in command line. This is useful in + cases where the provided arguments are insufficient and + you don't want to or cannot modify them. + +config CMDLINE_FORCE + bool "Always use the built-in kernel command string" + help + Always use the built-in command line, even if we get one during + boot. This is useful in case you need to override the provided + command line on systems where you don't have or want control + over it. + +endchoice + +config DMI + bool "Enable DMI scanning" + select DMI_SCAN_MACHINE_NON_EFI_FALLBACK + default y + help + This enables SMBIOS/DMI feature for systems, and scanning of + DMI to identify machine quirks. + +config EFI + bool "EFI runtime service support" + select UCS2_STRING + select EFI_RUNTIME_WRAPPERS + help + This enables the kernel to use EFI runtime services that are + available (such as the EFI variable services). + +config EFI_STUB + bool "EFI boot stub support" + default y + depends on EFI + select EFI_GENERIC_STUB + help + This kernel feature allows the kernel to be loaded directly by + EFI firmware without the use of a bootloader. + +config SMP + bool "Multi-Processing support" + help + This enables support for systems with more than one CPU. If you have + a system with only one CPU, say N. If you have a system with more + than one CPU, say Y. + + If you say N here, the kernel will run on uni- and multiprocessor + machines, but will use only one CPU of a multiprocessor machine. If + you say Y here, the kernel will run on many, but not all, + uniprocessor machines. On a uniprocessor machine, the kernel + will run faster if you say N here. + + See also the SMP-HOWTO available at <http://www.tldp.org/docs.html#howto>. + + If you don't know what to do here, say N. + +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + depends on SMP + select GENERIC_IRQ_MIGRATION + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu. + (Note: power management support will enable this option + automatically on SMP systems. ) + Say N if you want to disable CPU hotplug. + +config NR_CPUS + int "Maximum number of CPUs (2-256)" + range 2 256 + depends on SMP + default "64" + help + This allows you to specify the maximum number of CPUs which this + kernel will support. + +config NUMA + bool "NUMA Support" + select SMP + select ACPI_NUMA if ACPI + help + Say Y to compile the kernel with NUMA (Non-Uniform Memory Access) + support. This option improves performance on systems with more + than one NUMA node; on single node systems it is generally better + to leave it disabled. + +config NODES_SHIFT + int + default "6" + depends on NUMA + +config ARCH_FORCE_MAX_ORDER + int "Maximum zone order" + range 14 64 if PAGE_SIZE_64KB + default "14" if PAGE_SIZE_64KB + range 12 64 if PAGE_SIZE_16KB + default "12" if PAGE_SIZE_16KB + range 11 64 + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + The page size is not necessarily 4KB. Keep this in mind + when choosing a value for this option. + +config ARCH_IOREMAP + bool "Enable LoongArch DMW-based ioremap()" + help + We use generic TLB-based ioremap() by default since it has page + protection support. However, you can enable LoongArch DMW-based + ioremap() for better performance. + +config KEXEC + bool "Kexec system call" + select KEXEC_CORE + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + +config CRASH_DUMP + bool "Build kdump crash kernel" + help + Generate crash dump after being started by kexec. This should + be normally only set in special crash dump kernels which are + loaded in the main kernel with kexec-tools into a specially + reserved region and then later executed after a crash by + kdump/kexec. + + For more details see Documentation/admin-guide/kdump/kdump.rst + +config PHYSICAL_START + hex "Physical address where the kernel is loaded" + default "0x90000000a0000000" + depends on CRASH_DUMP + help + This gives the XKPRANGE address where the kernel is loaded. + If you plan to use kernel for capturing the crash dump change + this value to start of the reserved region (the "X" value as + specified in the "crashkernel=YM@XM" command line boot parameter + passed to the panic-ed kernel). + +config SECCOMP + bool "Enable seccomp to safely compute untrusted bytecode" + depends on PROC_FS + default y + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via /proc/<pid>/seccomp, it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + +endmenu + +config ARCH_SELECT_MEMORY_MODEL + def_bool y + +config ARCH_FLATMEM_ENABLE + def_bool y + depends on !NUMA + +config ARCH_SPARSEMEM_ENABLE + def_bool y + help + Say Y to support efficient handling of sparse physical memory, + for architectures which are either NUMA (Non-Uniform Memory Access) + or have huge holes in the physical address space for other reasons. + See <file:Documentation/mm/numa.rst> for more. + +config ARCH_ENABLE_THP_MIGRATION + def_bool y + depends on TRANSPARENT_HUGEPAGE + +config ARCH_MEMORY_PROBE + def_bool y + depends on MEMORY_HOTPLUG + +config MMU + bool + default y + +config ARCH_MMAP_RND_BITS_MIN + default 12 + +config ARCH_MMAP_RND_BITS_MAX + default 18 + +menu "Power management options" + +source "drivers/acpi/Kconfig" + +endmenu + +source "drivers/firmware/Kconfig" diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug new file mode 100644 index 000000000..8d36aab53 --- /dev/null +++ b/arch/loongarch/Kconfig.debug @@ -0,0 +1,29 @@ +choice + prompt "Choose kernel unwinder" + default UNWINDER_PROLOGUE if KALLSYMS + help + This determines which method will be used for unwinding kernel stack + traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack, + lockdep, and more. + +config UNWINDER_GUESS + bool "Guess unwinder" + help + This option enables the "guess" unwinder for unwinding kernel stack + traces. It scans the stack and reports every kernel text address it + finds. Some of the addresses it reports may be incorrect. + + While this option often produces false positives, it can still be + useful in many cases. + +config UNWINDER_PROLOGUE + bool "Prologue unwinder" + depends on KALLSYMS + help + This option enables the "prologue" unwinder for unwinding kernel stack + traces. It unwind the stack frame based on prologue code analyze. Symbol + information is needed, at least the address and length of each function. + Some of the addresses it reports may be incorrect (but better than the + Guess unwinder). + +endchoice diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile new file mode 100644 index 000000000..ed47a3a87 --- /dev/null +++ b/arch/loongarch/Makefile @@ -0,0 +1,132 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Author: Huacai Chen <chenhuacai@loongson.cn> +# Copyright (C) 2020-2022 Loongson Technology Corporation Limited + +boot := arch/loongarch/boot + +KBUILD_DEFCONFIG := loongson3_defconfig + +image-name-y := vmlinux +image-name-$(CONFIG_EFI_ZBOOT) := vmlinuz + +ifndef CONFIG_EFI_STUB +KBUILD_IMAGE := $(boot)/vmlinux.elf +else +KBUILD_IMAGE := $(boot)/$(image-name-y).efi +endif + +# +# Select the object file format to substitute into the linker script. +# +64bit-tool-archpref = loongarch64 +32bit-bfd = elf32-loongarch +64bit-bfd = elf64-loongarch +32bit-emul = elf32loongarch +64bit-emul = elf64loongarch + +ifdef CONFIG_64BIT +tool-archpref = $(64bit-tool-archpref) +UTS_MACHINE := loongarch64 +endif + +ifneq ($(SUBARCH),$(ARCH)) + ifeq ($(CROSS_COMPILE),) + CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux- $(tool-archpref)-linux-gnu- $(tool-archpref)-unknown-linux-gnu-) + endif +endif + +ifdef CONFIG_64BIT +ld-emul = $(64bit-emul) +cflags-y += -mabi=lp64s +endif + +cflags-y += -G0 -pipe -msoft-float +LDFLAGS_vmlinux += -G0 -static -n -nostdlib + +# When the assembler supports explicit relocation hint, we must use it. +# GCC may have -mexplicit-relocs off by default if it was built with an old +# assembler, so we force it via an option. +# +# When the assembler does not supports explicit relocation hint, we can't use +# it. Disable it if the compiler supports it. +# +# If you've seen "unknown reloc hint" message building the kernel and you are +# now wondering why "-mexplicit-relocs" is not wrapped with cc-option: the +# combination of a "new" assembler and "old" compiler is not supported. Either +# upgrade the compiler or downgrade the assembler. +ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS +cflags-y += -mexplicit-relocs +KBUILD_CFLAGS_KERNEL += -mdirect-extern-access +else +cflags-y += $(call cc-option,-mno-explicit-relocs) +KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel +KBUILD_CFLAGS_KERNEL += -Wa,-mla-global-with-pcrel +KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs +KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs +endif + +cflags-y += -ffreestanding +cflags-y += $(call cc-option, -mno-check-zero-division) + +ifndef CONFIG_PHYSICAL_START +load-y = 0x9000000000200000 +else +load-y = $(CONFIG_PHYSICAL_START) +endif +bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) + +drivers-$(CONFIG_PCI) += arch/loongarch/pci/ + +KBUILD_AFLAGS += $(cflags-y) +KBUILD_CFLAGS += $(cflags-y) +KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) + +# This is required to get dwarf unwinding tables into .debug_frame +# instead of .eh_frame so we don't discard them. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables + +# Don't emit unaligned accesses. +# Not all LoongArch cores support unaligned access, and as kernel we can't +# rely on others to provide emulation for these accesses. +KBUILD_CFLAGS += $(call cc-option,-mstrict-align) + +KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include) + +KBUILD_LDFLAGS += -m $(ld-emul) + +ifdef CONFIG_LOONGARCH +CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ + grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \ + sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g') +endif + +libs-y += arch/loongarch/lib/ +libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a + +ifeq ($(KBUILD_EXTMOD),) +prepare: vdso_prepare +vdso_prepare: prepare0 + $(Q)$(MAKE) $(build)=arch/loongarch/vdso include/generated/vdso-offsets.h +endif + +PHONY += vdso_install +vdso_install: + $(Q)$(MAKE) $(build)=arch/loongarch/vdso $@ + +all: $(notdir $(KBUILD_IMAGE)) + +vmlinuz.efi: vmlinux.efi + +vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@ + +install: + $(Q)install -D -m 755 $(KBUILD_IMAGE) $(INSTALL_PATH)/$(image-name-y)-$(KERNELRELEASE) + $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE) + $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE) + +define archhelp + echo ' install - install kernel into $(INSTALL_PATH)' + echo +endef diff --git a/arch/loongarch/boot/.gitignore b/arch/loongarch/boot/.gitignore new file mode 100644 index 000000000..e5dc594dc --- /dev/null +++ b/arch/loongarch/boot/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +vmlinux* +vmlinuz* diff --git a/arch/loongarch/boot/Makefile b/arch/loongarch/boot/Makefile new file mode 100644 index 000000000..4e1c374c5 --- /dev/null +++ b/arch/loongarch/boot/Makefile @@ -0,0 +1,26 @@ +# +# arch/loongarch/boot/Makefile +# +# Copyright (C) 2020-2022 Loongson Technology Corporation Limited +# + +drop-sections := .comment .note .options .note.gnu.build-id +strip-flags := $(addprefix --remove-section=,$(drop-sections)) -S +OBJCOPYFLAGS_vmlinux.efi := -O binary $(strip-flags) + +quiet_cmd_strip = STRIP $@ + cmd_strip = $(STRIP) -s -o $@ $< + +targets := vmlinux.elf +$(obj)/vmlinux.elf: vmlinux FORCE + $(call if_changed,strip) + +targets += vmlinux.efi +$(obj)/vmlinux.efi: vmlinux FORCE + $(call if_changed,objcopy) + +EFI_ZBOOT_PAYLOAD := vmlinux.efi +EFI_ZBOOT_BFD_TARGET := elf64-loongarch +EFI_ZBOOT_MACH_TYPE := LOONGARCH64 + +include $(srctree)/drivers/firmware/efi/libstub/Makefile.zboot diff --git a/arch/loongarch/boot/dts/Makefile b/arch/loongarch/boot/dts/Makefile new file mode 100644 index 000000000..5f1f55e91 --- /dev/null +++ b/arch/loongarch/boot/dts/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +dtstree := $(srctree)/$(src) + +dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig new file mode 100644 index 000000000..3540e9c0a --- /dev/null +++ b/arch/loongarch/configs/loongson3_defconfig @@ -0,0 +1,850 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_PREEMPT=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SYSFS_DEPRECATED=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_USERFAULTFD=y +CONFIG_PERF_EVENTS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_LOONGARCH=y +CONFIG_64BIT=y +CONFIG_MACH_LOONGSON64=y +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_NR_CPUS=64 +CONFIG_NUMA=y +CONFIG_KEXEC=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_HZ_250=y +CONFIG_ACPI=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BINFMT_MISC=m +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_ZSWAP=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=m +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_XDP_SOCKETS=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_INET_ESP=m +CONFIG_INET_UDP_DIAG=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BBR=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_INET6_ESP=m +CONFIG_IPV6_MROUTE=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_TABLES=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_NFCT=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BPFILTER=y +CONFIG_IP_SCTP=m +CONFIG_RDS=y +CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC2=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_BPF=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_MTK=y +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_AG6XX=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_ATH3K=m +CONFIG_BT_VIRTIO=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_CEPH_LIB=m +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_IOV=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_PCCARD=m +CONFIG_YENTA=m +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_ZSTD=y +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +CONFIG_PARPORT=y +CONFIG_PARPORT_PC=y +CONFIG_PARPORT_SERIAL=y +CONFIG_PARPORT_PC_FIFO=y +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_ZSTD=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_EEPROM_AT24=m +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=y +CONFIG_SCSI_MPT2SAS=y +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_PATA_ATIIXP=y +CONFIG_PATA_PCMCIA=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_MIRROR=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=y +CONFIG_WIREGUARD=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_VXLAN=y +CONFIG_RIONET=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_BNX2=y +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IXGB=y +CONFIG_IXGBE=y +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +CONFIG_R8169=y +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_STMMAC_ETH=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_ATH9K=m +CONFIG_ATH9K_HTC=m +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_HOSTAP=m +CONFIG_MT7601U=m +CONFIG_RT2X00=m +CONFIG_RT2800USB=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8XXXU=m +CONFIG_RTW88=m +CONFIG_RTW88_8822BE=m +CONFIG_RTW88_8822CE=m +CONFIG_RTW88_8723DE=m +CONFIG_RTW88_8821CE=m +CONFIG_RTW89=m +CONFIG_RTW89_8852AE=m +CONFIG_RTW89_8852CE=m +CONFIG_ZD1211RW=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_XTKBD=m +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_SERIAL=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=m +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_RAW=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_PRINTER=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_PIIX4=y +CONFIG_I2C_GPIO=y +CONFIG_SPI=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_LOONGSON=y +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83627HF=m +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_IR_IMON_DECODER=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_DRM=y +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_AST=y +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FB_RADEON=y +CONFIG_LCD_PLATFORM=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +# CONFIG_SND_ISA is not set +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_HDA_INTEL=y +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_SIGMATEL=y +CONFIG_SND_HDA_CODEC_HDMI=y +CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_USB_AUDIO=m +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_A4TECH=m +CONFIG_HID_CHERRY=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_SUNPLUS=m +CONFIG_USB_HIDDEV=y +CONFIG_USB=y +CONFIG_USB_OTG=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=m +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_UAS=m +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_GADGET=y +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_INFINIBAND=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_EFI=y +CONFIG_DMADEVICES=y +CONFIG_UIO=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_COMEDI=m +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +CONFIG_STAGING=y +CONFIG_R8188EU=m +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_PWM=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_BTRFS_FS=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_CIFS=m +# CONFIG_CIFS_DEBUG is not set +CONFIG_9P_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_PRINTK_TIME=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_FTRACE is not set diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild new file mode 100644 index 000000000..77ad8e6f0 --- /dev/null +++ b/arch/loongarch/include/asm/Kbuild @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += dma-contiguous.h +generic-y += export.h +generic-y += mcs_spinlock.h +generic-y += parport.h +generic-y += early_ioremap.h +generic-y += qrwlock.h +generic-y += qspinlock.h +generic-y += rwsem.h +generic-y += segment.h +generic-y += user.h +generic-y += stat.h +generic-y += fcntl.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += mman.h +generic-y += msgbuf.h +generic-y += sembuf.h +generic-y += shmbuf.h +generic-y += statfs.h +generic-y += socket.h +generic-y += sockios.h +generic-y += termbits.h +generic-y += poll.h +generic-y += param.h +generic-y += posix_types.h +generic-y += resource.h +generic-y += kvm_para.h diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h new file mode 100644 index 000000000..52f298f72 --- /dev/null +++ b/arch/loongarch/include/asm/acenv.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * LoongArch specific ACPICA environments and implementation + * + * Author: Jianmin Lv <lvjianmin@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_ACENV_H +#define _ASM_LOONGARCH_ACENV_H + +/* + * This header is required by ACPI core, but we have nothing to fill in + * right now. Will be updated later when needed. + */ + +#endif /* _ASM_LOONGARCH_ACENV_H */ diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h new file mode 100644 index 000000000..17162f494 --- /dev/null +++ b/arch/loongarch/include/asm/acpi.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Jianmin Lv <lvjianmin@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_ACPI_H +#define _ASM_LOONGARCH_ACPI_H + +#ifdef CONFIG_ACPI +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; +extern int acpi_noirq; + +#define acpi_os_ioremap acpi_os_ioremap +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +static inline bool acpi_has_cpu_in_madt(void) +{ + return true; +} + +extern struct list_head acpi_wakeup_device_list; + +/* + * Temporary definitions until the core ACPICA code gets updated (see + * 1656837932-18257-1-git-send-email-lvjianmin@loongson.cn and its + * follow-ups for the "rationale"). + * + * Once the "legal reasons" are cleared and that the code is merged, + * this can be dropped entierely. + */ +#if (ACPI_CA_VERSION == 0x20220331 && !defined(LOONGARCH_ACPICA_EXT)) + +#define LOONGARCH_ACPICA_EXT 1 + +#define ACPI_MADT_TYPE_CORE_PIC 17 +#define ACPI_MADT_TYPE_LIO_PIC 18 +#define ACPI_MADT_TYPE_HT_PIC 19 +#define ACPI_MADT_TYPE_EIO_PIC 20 +#define ACPI_MADT_TYPE_MSI_PIC 21 +#define ACPI_MADT_TYPE_BIO_PIC 22 +#define ACPI_MADT_TYPE_LPC_PIC 23 + +/* Values for Version field above */ + +enum acpi_madt_core_pic_version { + ACPI_MADT_CORE_PIC_VERSION_NONE = 0, + ACPI_MADT_CORE_PIC_VERSION_V1 = 1, + ACPI_MADT_CORE_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */ +}; + +enum acpi_madt_lio_pic_version { + ACPI_MADT_LIO_PIC_VERSION_NONE = 0, + ACPI_MADT_LIO_PIC_VERSION_V1 = 1, + ACPI_MADT_LIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */ +}; + +enum acpi_madt_eio_pic_version { + ACPI_MADT_EIO_PIC_VERSION_NONE = 0, + ACPI_MADT_EIO_PIC_VERSION_V1 = 1, + ACPI_MADT_EIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */ +}; + +enum acpi_madt_ht_pic_version { + ACPI_MADT_HT_PIC_VERSION_NONE = 0, + ACPI_MADT_HT_PIC_VERSION_V1 = 1, + ACPI_MADT_HT_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */ +}; + +enum acpi_madt_bio_pic_version { + ACPI_MADT_BIO_PIC_VERSION_NONE = 0, + ACPI_MADT_BIO_PIC_VERSION_V1 = 1, + ACPI_MADT_BIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */ +}; + +enum acpi_madt_msi_pic_version { + ACPI_MADT_MSI_PIC_VERSION_NONE = 0, + ACPI_MADT_MSI_PIC_VERSION_V1 = 1, + ACPI_MADT_MSI_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */ +}; + +enum acpi_madt_lpc_pic_version { + ACPI_MADT_LPC_PIC_VERSION_NONE = 0, + ACPI_MADT_LPC_PIC_VERSION_V1 = 1, + ACPI_MADT_LPC_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */ +}; + +#pragma pack(1) + +/* Core Interrupt Controller */ + +struct acpi_madt_core_pic { + struct acpi_subtable_header header; + u8 version; + u32 processor_id; + u32 core_id; + u32 flags; +}; + +/* Legacy I/O Interrupt Controller */ + +struct acpi_madt_lio_pic { + struct acpi_subtable_header header; + u8 version; + u64 address; + u16 size; + u8 cascade[2]; + u32 cascade_map[2]; +}; + +/* Extend I/O Interrupt Controller */ + +struct acpi_madt_eio_pic { + struct acpi_subtable_header header; + u8 version; + u8 cascade; + u8 node; + u64 node_map; +}; + +/* HT Interrupt Controller */ + +struct acpi_madt_ht_pic { + struct acpi_subtable_header header; + u8 version; + u64 address; + u16 size; + u8 cascade[8]; +}; + +/* Bridge I/O Interrupt Controller */ + +struct acpi_madt_bio_pic { + struct acpi_subtable_header header; + u8 version; + u64 address; + u16 size; + u16 id; + u16 gsi_base; +}; + +/* MSI Interrupt Controller */ + +struct acpi_madt_msi_pic { + struct acpi_subtable_header header; + u8 version; + u64 msg_address; + u32 start; + u32 count; +}; + +/* LPC Interrupt Controller */ + +struct acpi_madt_lpc_pic { + struct acpi_subtable_header header; + u8 version; + u64 address; + u16 size; + u8 cascade; +}; + +#pragma pack() + +#endif + +#endif /* !CONFIG_ACPI */ + +#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT + +#endif /* _ASM_LOONGARCH_ACPI_H */ diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h new file mode 100644 index 000000000..d342935e5 --- /dev/null +++ b/arch/loongarch/include/asm/addrspace.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1996, 99 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999 by Silicon Graphics, Inc. + */ +#ifndef _ASM_ADDRSPACE_H +#define _ASM_ADDRSPACE_H + +#include <linux/const.h> + +#include <asm/loongarch.h> + +/* + * This gives the physical RAM offset. + */ +#ifndef __ASSEMBLY__ +#ifndef PHYS_OFFSET +#define PHYS_OFFSET _AC(0, UL) +#endif +extern unsigned long vm_map_base; +#endif /* __ASSEMBLY__ */ + +#ifndef IO_BASE +#define IO_BASE CSR_DMW0_BASE +#endif + +#ifndef CACHE_BASE +#define CACHE_BASE CSR_DMW1_BASE +#endif + +#ifndef UNCACHE_BASE +#define UNCACHE_BASE CSR_DMW0_BASE +#endif + +#define DMW_PABITS 48 +#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1) + +/* + * Memory above this physical address will be considered highmem. + */ +#ifndef HIGHMEM_START +#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL)) +#endif + +#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK)) +#define TO_CACHE(x) (CACHE_BASE | ((x) & TO_PHYS_MASK)) +#define TO_UNCACHE(x) (UNCACHE_BASE | ((x) & TO_PHYS_MASK)) + +/* + * This handles the memory map. + */ +#ifndef PAGE_OFFSET +#define PAGE_OFFSET (CACHE_BASE + PHYS_OFFSET) +#endif + +#ifndef FIXADDR_TOP +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) +#endif + +#ifdef __ASSEMBLY__ +#define _ATYPE_ +#define _ATYPE32_ +#define _ATYPE64_ +#define _CONST64_(x) x +#else +#define _ATYPE_ __PTRDIFF_TYPE__ +#define _ATYPE32_ int +#define _ATYPE64_ __s64 +#ifdef CONFIG_64BIT +#define _CONST64_(x) x ## L +#else +#define _CONST64_(x) x ## LL +#endif +#endif + +/* + * 32/64-bit LoongArch address spaces + */ +#ifdef __ASSEMBLY__ +#define _ACAST32_ +#define _ACAST64_ +#else +#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */ +#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */ +#endif + +#ifdef CONFIG_32BIT + +#define UVRANGE 0x00000000 +#define KPRANGE0 0x80000000 +#define KPRANGE1 0xa0000000 +#define KVRANGE 0xc0000000 + +#else + +#define XUVRANGE _CONST64_(0x0000000000000000) +#define XSPRANGE _CONST64_(0x4000000000000000) +#define XKPRANGE _CONST64_(0x8000000000000000) +#define XKVRANGE _CONST64_(0xc000000000000000) + +#endif + +/* + * Returns the physical address of a KPRANGEx / XKPRANGE address + */ +#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK) + +/* + * On LoongArch, I/O ports mappring is following: + * + * | .... | + * |-----------------------| + * | pci io ports(16K~32M) | + * |-----------------------| + * | isa io ports(0 ~16K) | + * PCI_IOBASE ->|-----------------------| + * | .... | + */ +#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE))) +#define PCI_IOSIZE SZ_32M +#define ISA_IOSIZE SZ_16K +#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) + +#endif /* _ASM_ADDRSPACE_H */ diff --git a/arch/loongarch/include/asm/asm-offsets.h b/arch/loongarch/include/asm/asm-offsets.h new file mode 100644 index 000000000..d9ad88d29 --- /dev/null +++ b/arch/loongarch/include/asm/asm-offsets.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <generated/asm-offsets.h> diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h new file mode 100644 index 000000000..ed06d3997 --- /dev/null +++ b/arch/loongarch/include/asm/asm-prototypes.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/uaccess.h> +#include <asm/fpu.h> +#include <asm/mmu_context.h> +#include <asm/page.h> +#include <asm/ftrace.h> +#include <asm-generic/asm-prototypes.h> diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h new file mode 100644 index 000000000..40eea6aa4 --- /dev/null +++ b/arch/loongarch/include/asm/asm.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Some useful macros for LoongArch assembler code + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle + * Copyright (C) 1999 by Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2002 Maciej W. Rozycki + */ +#ifndef __ASM_ASM_H +#define __ASM_ASM_H + +/* LoongArch pref instruction. */ +#ifdef CONFIG_CPU_HAS_PREFETCH + +#define PREF(hint, addr, offs) \ + preld hint, addr, offs; \ + +#define PREFX(hint, addr, index) \ + preldx hint, addr, index; \ + +#else /* !CONFIG_CPU_HAS_PREFETCH */ + +#define PREF(hint, addr, offs) +#define PREFX(hint, addr, index) + +#endif /* !CONFIG_CPU_HAS_PREFETCH */ + +/* + * Stack alignment + */ +#define STACK_ALIGN ~(0xf) + +/* + * Macros to handle different pointer/register sizes for 32/64-bit code + */ + +/* + * Size of a register + */ +#ifndef __loongarch64 +#define SZREG 4 +#else +#define SZREG 8 +#endif + +/* + * Use the following macros in assemblercode to load/store registers, + * pointers etc. + */ +#if (SZREG == 4) +#define REG_L ld.w +#define REG_S st.w +#define REG_ADD add.w +#define REG_SUB sub.w +#else /* SZREG == 8 */ +#define REG_L ld.d +#define REG_S st.d +#define REG_ADD add.d +#define REG_SUB sub.d +#endif + +/* + * How to add/sub/load/store/shift C int variables. + */ +#if (__SIZEOF_INT__ == 4) +#define INT_ADD add.w +#define INT_ADDI addi.w +#define INT_SUB sub.w +#define INT_L ld.w +#define INT_S st.w +#define INT_SLL slli.w +#define INT_SLLV sll.w +#define INT_SRL srli.w +#define INT_SRLV srl.w +#define INT_SRA srai.w +#define INT_SRAV sra.w +#endif + +#if (__SIZEOF_INT__ == 8) +#define INT_ADD add.d +#define INT_ADDI addi.d +#define INT_SUB sub.d +#define INT_L ld.d +#define INT_S st.d +#define INT_SLL slli.d +#define INT_SLLV sll.d +#define INT_SRL srli.d +#define INT_SRLV srl.d +#define INT_SRA srai.d +#define INT_SRAV sra.d +#endif + +/* + * How to add/sub/load/store/shift C long variables. + */ +#if (__SIZEOF_LONG__ == 4) +#define LONG_ADD add.w +#define LONG_ADDI addi.w +#define LONG_SUB sub.w +#define LONG_L ld.w +#define LONG_S st.w +#define LONG_SLL slli.w +#define LONG_SLLV sll.w +#define LONG_SRL srli.w +#define LONG_SRLV srl.w +#define LONG_SRA srai.w +#define LONG_SRAV sra.w + +#ifdef __ASSEMBLY__ +#define LONG .word +#endif +#define LONGSIZE 4 +#define LONGMASK 3 +#define LONGLOG 2 +#endif + +#if (__SIZEOF_LONG__ == 8) +#define LONG_ADD add.d +#define LONG_ADDI addi.d +#define LONG_SUB sub.d +#define LONG_L ld.d +#define LONG_S st.d +#define LONG_SLL slli.d +#define LONG_SLLV sll.d +#define LONG_SRL srli.d +#define LONG_SRLV srl.d +#define LONG_SRA srai.d +#define LONG_SRAV sra.d + +#ifdef __ASSEMBLY__ +#define LONG .dword +#endif +#define LONGSIZE 8 +#define LONGMASK 7 +#define LONGLOG 3 +#endif + +/* + * How to add/sub/load/store/shift pointers. + */ +#if (__SIZEOF_POINTER__ == 4) +#define PTR_ADD add.w +#define PTR_ADDI addi.w +#define PTR_SUB sub.w +#define PTR_L ld.w +#define PTR_S st.w +#define PTR_LI li.w +#define PTR_SLL slli.w +#define PTR_SLLV sll.w +#define PTR_SRL srli.w +#define PTR_SRLV srl.w +#define PTR_SRA srai.w +#define PTR_SRAV sra.w + +#define PTR_SCALESHIFT 2 + +#ifdef __ASSEMBLY__ +#define PTR .word +#endif +#define PTRSIZE 4 +#define PTRLOG 2 +#endif + +#if (__SIZEOF_POINTER__ == 8) +#define PTR_ADD add.d +#define PTR_ADDI addi.d +#define PTR_SUB sub.d +#define PTR_L ld.d +#define PTR_S st.d +#define PTR_LI li.d +#define PTR_SLL slli.d +#define PTR_SLLV sll.d +#define PTR_SRL srli.d +#define PTR_SRLV srl.d +#define PTR_SRA srai.d +#define PTR_SRAV sra.d + +#define PTR_SCALESHIFT 3 + +#ifdef __ASSEMBLY__ +#define PTR .dword +#endif +#define PTRSIZE 8 +#define PTRLOG 3 +#endif + +#endif /* __ASM_ASM_H */ diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h new file mode 100644 index 000000000..be037a405 --- /dev/null +++ b/arch/loongarch/include/asm/asmmacro.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_ASMMACRO_H +#define _ASM_ASMMACRO_H + +#include <asm/asm-offsets.h> +#include <asm/regdef.h> +#include <asm/fpregdef.h> +#include <asm/loongarch.h> + + .macro parse_v var val + \var = \val + .endm + + .macro parse_r var r + \var = -1 + .ifc \r, $r0 + \var = 0 + .endif + .ifc \r, $r1 + \var = 1 + .endif + .ifc \r, $r2 + \var = 2 + .endif + .ifc \r, $r3 + \var = 3 + .endif + .ifc \r, $r4 + \var = 4 + .endif + .ifc \r, $r5 + \var = 5 + .endif + .ifc \r, $r6 + \var = 6 + .endif + .ifc \r, $r7 + \var = 7 + .endif + .ifc \r, $r8 + \var = 8 + .endif + .ifc \r, $r9 + \var = 9 + .endif + .ifc \r, $r10 + \var = 10 + .endif + .ifc \r, $r11 + \var = 11 + .endif + .ifc \r, $r12 + \var = 12 + .endif + .ifc \r, $r13 + \var = 13 + .endif + .ifc \r, $r14 + \var = 14 + .endif + .ifc \r, $r15 + \var = 15 + .endif + .ifc \r, $r16 + \var = 16 + .endif + .ifc \r, $r17 + \var = 17 + .endif + .ifc \r, $r18 + \var = 18 + .endif + .ifc \r, $r19 + \var = 19 + .endif + .ifc \r, $r20 + \var = 20 + .endif + .ifc \r, $r21 + \var = 21 + .endif + .ifc \r, $r22 + \var = 22 + .endif + .ifc \r, $r23 + \var = 23 + .endif + .ifc \r, $r24 + \var = 24 + .endif + .ifc \r, $r25 + \var = 25 + .endif + .ifc \r, $r26 + \var = 26 + .endif + .ifc \r, $r27 + \var = 27 + .endif + .ifc \r, $r28 + \var = 28 + .endif + .ifc \r, $r29 + \var = 29 + .endif + .ifc \r, $r30 + \var = 30 + .endif + .ifc \r, $r31 + \var = 31 + .endif + .iflt \var + .error "Unable to parse register name \r" + .endif + .endm + + .macro cpu_save_nonscratch thread + stptr.d s0, \thread, THREAD_REG23 + stptr.d s1, \thread, THREAD_REG24 + stptr.d s2, \thread, THREAD_REG25 + stptr.d s3, \thread, THREAD_REG26 + stptr.d s4, \thread, THREAD_REG27 + stptr.d s5, \thread, THREAD_REG28 + stptr.d s6, \thread, THREAD_REG29 + stptr.d s7, \thread, THREAD_REG30 + stptr.d s8, \thread, THREAD_REG31 + stptr.d sp, \thread, THREAD_REG03 + stptr.d fp, \thread, THREAD_REG22 + .endm + + .macro cpu_restore_nonscratch thread + ldptr.d s0, \thread, THREAD_REG23 + ldptr.d s1, \thread, THREAD_REG24 + ldptr.d s2, \thread, THREAD_REG25 + ldptr.d s3, \thread, THREAD_REG26 + ldptr.d s4, \thread, THREAD_REG27 + ldptr.d s5, \thread, THREAD_REG28 + ldptr.d s6, \thread, THREAD_REG29 + ldptr.d s7, \thread, THREAD_REG30 + ldptr.d s8, \thread, THREAD_REG31 + ldptr.d ra, \thread, THREAD_REG01 + ldptr.d sp, \thread, THREAD_REG03 + ldptr.d fp, \thread, THREAD_REG22 + .endm + + .macro fpu_save_csr thread tmp + movfcsr2gr \tmp, fcsr0 + stptr.w \tmp, \thread, THREAD_FCSR + .endm + + .macro fpu_restore_csr thread tmp + ldptr.w \tmp, \thread, THREAD_FCSR + movgr2fcsr fcsr0, \tmp + .endm + + .macro fpu_save_cc thread tmp0 tmp1 + movcf2gr \tmp0, $fcc0 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc1 + bstrins.d \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc2 + bstrins.d \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc3 + bstrins.d \tmp1, \tmp0, 31, 24 + movcf2gr \tmp0, $fcc4 + bstrins.d \tmp1, \tmp0, 39, 32 + movcf2gr \tmp0, $fcc5 + bstrins.d \tmp1, \tmp0, 47, 40 + movcf2gr \tmp0, $fcc6 + bstrins.d \tmp1, \tmp0, 55, 48 + movcf2gr \tmp0, $fcc7 + bstrins.d \tmp1, \tmp0, 63, 56 + stptr.d \tmp1, \thread, THREAD_FCC + .endm + + .macro fpu_restore_cc thread tmp0 tmp1 + ldptr.d \tmp0, \thread, THREAD_FCC + bstrpick.d \tmp1, \tmp0, 7, 0 + movgr2cf $fcc0, \tmp1 + bstrpick.d \tmp1, \tmp0, 15, 8 + movgr2cf $fcc1, \tmp1 + bstrpick.d \tmp1, \tmp0, 23, 16 + movgr2cf $fcc2, \tmp1 + bstrpick.d \tmp1, \tmp0, 31, 24 + movgr2cf $fcc3, \tmp1 + bstrpick.d \tmp1, \tmp0, 39, 32 + movgr2cf $fcc4, \tmp1 + bstrpick.d \tmp1, \tmp0, 47, 40 + movgr2cf $fcc5, \tmp1 + bstrpick.d \tmp1, \tmp0, 55, 48 + movgr2cf $fcc6, \tmp1 + bstrpick.d \tmp1, \tmp0, 63, 56 + movgr2cf $fcc7, \tmp1 + .endm + + .macro fpu_save_double thread tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \tmp, \tmp, \thread + fst.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0 + fst.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0 + fst.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0 + fst.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0 + fst.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0 + fst.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0 + fst.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0 + fst.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0 + fst.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0 + fst.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0 + fst.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0 + fst.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0 + fst.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0 + fst.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0 + fst.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0 + fst.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0 + fst.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0 + fst.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0 + fst.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0 + fst.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0 + fst.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0 + fst.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0 + fst.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0 + fst.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0 + fst.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0 + fst.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0 + fst.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0 + fst.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0 + fst.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0 + fst.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0 + fst.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0 + fst.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0 + .endm + + .macro fpu_restore_double thread tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \tmp, \tmp, \thread + fld.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0 + fld.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0 + fld.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0 + fld.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0 + fld.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0 + fld.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0 + fld.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0 + fld.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0 + fld.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0 + fld.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0 + fld.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0 + fld.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0 + fld.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0 + fld.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0 + fld.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0 + fld.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0 + fld.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0 + fld.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0 + fld.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0 + fld.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0 + fld.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0 + fld.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0 + fld.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0 + fld.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0 + fld.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0 + fld.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0 + fld.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0 + fld.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0 + fld.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0 + fld.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0 + fld.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0 + fld.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0 + .endm + +.macro not dst src + nor \dst, \src, zero +.endm + +#endif /* _ASM_ASMMACRO_H */ diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h new file mode 100644 index 000000000..6b9aca9ab --- /dev/null +++ b/arch/loongarch/include/asm/atomic.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Atomic operations. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_ATOMIC_H +#define _ASM_ATOMIC_H + +#include <linux/types.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h> + +#if __SIZEOF_LONG__ == 4 +#define __LL "ll.w " +#define __SC "sc.w " +#define __AMADD "amadd.w " +#define __AMAND_DB "amand_db.w " +#define __AMOR_DB "amor_db.w " +#define __AMXOR_DB "amxor_db.w " +#elif __SIZEOF_LONG__ == 8 +#define __LL "ll.d " +#define __SC "sc.d " +#define __AMADD "amadd.d " +#define __AMAND_DB "amand_db.d " +#define __AMOR_DB "amor_db.d " +#define __AMXOR_DB "amxor_db.d " +#endif + +#define ATOMIC_INIT(i) { (i) } + +/* + * arch_atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#define arch_atomic_read(v) READ_ONCE((v)->counter) + +/* + * arch_atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) + +#define ATOMIC_OP(op, I, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + __asm__ __volatile__( \ + "am"#asm_op"_db.w" " $zero, %1, %0 \n" \ + : "+ZB" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} + +#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op"_db.w" " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result c_op I; \ +} + +#define ATOMIC_FETCH_OP(op, I, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op"_db.w" " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result; \ +} + +#define ATOMIC_OPS(op, I, asm_op, c_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ + ATOMIC_FETCH_OP(op, I, asm_op) + +ATOMIC_OPS(add, i, add, +) +ATOMIC_OPS(sub, -i, add, +) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, I, asm_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_FETCH_OP(op, I, asm_op) + +ATOMIC_OPS(and, i, and) +ATOMIC_OPS(or, i, or) +ATOMIC_OPS(xor, i, xor) + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int prev, rc; + + __asm__ __volatile__ ( + "0: ll.w %[p], %[c]\n" + " beq %[p], %[u], 1f\n" + " add.w %[rc], %[p], %[a]\n" + " sc.w %[rc], %[c]\n" + " beqz %[rc], 0b\n" + " b 2f\n" + "1:\n" + __WEAK_LLSC_MB + "2:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), + [c]"=ZB" (v->counter) + : [a]"r" (a), [u]"r" (u) + : "memory"); + + return prev; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless + +/* + * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. + */ +static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) +{ + int result; + int temp; + + if (__builtin_constant_p(i)) { + __asm__ __volatile__( + "1: ll.w %1, %2 # atomic_sub_if_positive\n" + " addi.w %0, %1, %3 \n" + " move %1, %0 \n" + " bltz %0, 2f \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + "2: \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) + : "I" (-i)); + } else { + __asm__ __volatile__( + "1: ll.w %1, %2 # atomic_sub_if_positive\n" + " sub.w %0, %1, %3 \n" + " move %1, %0 \n" + " bltz %0, 2f \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + "2: \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) + : "r" (i)); + } + + return result; +} + +#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new))) + +/* + * arch_atomic_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + */ +#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v) + +#ifdef CONFIG_64BIT + +#define ATOMIC64_INIT(i) { (i) } + +/* + * arch_atomic64_read - read atomic variable + * @v: pointer of type atomic64_t + * + */ +#define arch_atomic64_read(v) READ_ONCE((v)->counter) + +/* + * arch_atomic64_set - set atomic variable + * @v: pointer of type atomic64_t + * @i: required value + */ +#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) + +#define ATOMIC64_OP(op, I, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + __asm__ __volatile__( \ + "am"#asm_op"_db.d " " $zero, %1, %0 \n" \ + : "+ZB" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} + +#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \ +{ \ + long result; \ + __asm__ __volatile__( \ + "am"#asm_op"_db.d " " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result c_op I; \ +} + +#define ATOMIC64_FETCH_OP(op, I, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op"_db.d " " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result; \ +} + +#define ATOMIC64_OPS(op, I, asm_op, c_op) \ + ATOMIC64_OP(op, I, asm_op) \ + ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ + ATOMIC64_FETCH_OP(op, I, asm_op) + +ATOMIC64_OPS(add, i, add, +) +ATOMIC64_OPS(sub, -i, add, +) + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed + +#undef ATOMIC64_OPS + +#define ATOMIC64_OPS(op, I, asm_op) \ + ATOMIC64_OP(op, I, asm_op) \ + ATOMIC64_FETCH_OP(op, I, asm_op) + +ATOMIC64_OPS(and, i, and) +ATOMIC64_OPS(or, i, or) +ATOMIC64_OPS(xor, i, xor) + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed + +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long prev, rc; + + __asm__ __volatile__ ( + "0: ll.d %[p], %[c]\n" + " beq %[p], %[u], 1f\n" + " add.d %[rc], %[p], %[a]\n" + " sc.d %[rc], %[c]\n" + " beqz %[rc], 0b\n" + " b 2f\n" + "1:\n" + __WEAK_LLSC_MB + "2:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), + [c] "=ZB" (v->counter) + : [a]"r" (a), [u]"r" (u) + : "memory"); + + return prev; +} +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless + +/* + * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. + */ +static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) +{ + long result; + long temp; + + if (__builtin_constant_p(i)) { + __asm__ __volatile__( + "1: ll.d %1, %2 # atomic64_sub_if_positive \n" + " addi.d %0, %1, %3 \n" + " move %1, %0 \n" + " bltz %0, 2f \n" + " sc.d %1, %2 \n" + " beqz %1, 1b \n" + "2: \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) + : "I" (-i)); + } else { + __asm__ __volatile__( + "1: ll.d %1, %2 # atomic64_sub_if_positive \n" + " sub.d %0, %1, %3 \n" + " move %1, %0 \n" + " bltz %0, 2f \n" + " sc.d %1, %2 \n" + " beqz %1, 1b \n" + "2: \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) + : "r" (i)); + } + + return result; +} + +#define arch_atomic64_cmpxchg(v, o, n) \ + ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new))) + +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic64_t + */ +#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v) + +#endif /* CONFIG_64BIT */ + +#endif /* _ASM_ATOMIC_H */ diff --git a/arch/loongarch/include/asm/barrier.h b/arch/loongarch/include/asm/barrier.h new file mode 100644 index 000000000..cda977675 --- /dev/null +++ b/arch/loongarch/include/asm/barrier.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#define __sync() __asm__ __volatile__("dbar 0" : : : "memory") + +#define fast_wmb() __sync() +#define fast_rmb() __sync() +#define fast_mb() __sync() +#define fast_iob() __sync() +#define wbflush() __sync() + +#define wmb() fast_wmb() +#define rmb() fast_rmb() +#define mb() fast_mb() +#define iob() fast_iob() + +#define __smp_mb() __asm__ __volatile__("dbar 0" : : : "memory") +#define __smp_rmb() __asm__ __volatile__("dbar 0" : : : "memory") +#define __smp_wmb() __asm__ __volatile__("dbar 0" : : : "memory") + +#ifdef CONFIG_SMP +#define __WEAK_LLSC_MB " dbar 0 \n" +#else +#define __WEAK_LLSC_MB " \n" +#endif + +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + +/** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * Returns: + * 0 - (@index < @size) + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + __asm__ __volatile__( + "sltu %0, %1, %2\n\t" +#if (__SIZEOF_LONG__ == 4) + "sub.w %0, $zero, %0\n\t" +#elif (__SIZEOF_LONG__ == 8) + "sub.d %0, $zero, %0\n\t" +#endif + : "=r" (mask) + : "r" (index), "r" (size) + :); + + return mask; +} + +#define __smp_load_acquire(p) \ +({ \ + union { typeof(*p) __val; char __c[1]; } __u; \ + unsigned long __tmp = 0; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + *(__u8 *)__u.__c = *(volatile __u8 *)p; \ + __smp_mb(); \ + break; \ + case 2: \ + *(__u16 *)__u.__c = *(volatile __u16 *)p; \ + __smp_mb(); \ + break; \ + case 4: \ + __asm__ __volatile__( \ + "amor_db.w %[val], %[tmp], %[mem] \n" \ + : [val] "=&r" (*(__u32 *)__u.__c) \ + : [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__( \ + "amor_db.d %[val], %[tmp], %[mem] \n" \ + : [val] "=&r" (*(__u64 *)__u.__c) \ + : [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp) \ + : "memory"); \ + break; \ + } \ + (typeof(*p))__u.__val; \ +}) + +#define __smp_store_release(p, v) \ +do { \ + union { typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(*p)) (v) }; \ + unsigned long __tmp; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + __smp_mb(); \ + *(volatile __u8 *)p = *(__u8 *)__u.__c; \ + break; \ + case 2: \ + __smp_mb(); \ + *(volatile __u16 *)p = *(__u16 *)__u.__c; \ + break; \ + case 4: \ + __asm__ __volatile__( \ + "amswap_db.w %[tmp], %[val], %[mem] \n" \ + : [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp) \ + : [val] "r" (*(__u32 *)__u.__c) \ + : ); \ + break; \ + case 8: \ + __asm__ __volatile__( \ + "amswap_db.d %[tmp], %[val], %[mem] \n" \ + : [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp) \ + : [val] "r" (*(__u64 *)__u.__c) \ + : ); \ + break; \ + } \ +} while (0) + +#define __smp_store_mb(p, v) \ +do { \ + union { typeof(p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(p)) (v) }; \ + unsigned long __tmp; \ + switch (sizeof(p)) { \ + case 1: \ + *(volatile __u8 *)&p = *(__u8 *)__u.__c; \ + __smp_mb(); \ + break; \ + case 2: \ + *(volatile __u16 *)&p = *(__u16 *)__u.__c; \ + __smp_mb(); \ + break; \ + case 4: \ + __asm__ __volatile__( \ + "amswap_db.w %[tmp], %[val], %[mem] \n" \ + : [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \ + : [val] "r" (*(__u32 *)__u.__c) \ + : ); \ + break; \ + case 8: \ + __asm__ __volatile__( \ + "amswap_db.d %[tmp], %[val], %[mem] \n" \ + : [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \ + : [val] "r" (*(__u64 *)__u.__c) \ + : ); \ + break; \ + } \ +} while (0) + +#include <asm-generic/barrier.h> + +#endif /* __ASM_BARRIER_H */ diff --git a/arch/loongarch/include/asm/bitops.h b/arch/loongarch/include/asm/bitops.h new file mode 100644 index 000000000..69e00f8d8 --- /dev/null +++ b/arch/loongarch/include/asm/bitops.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BITOPS_H +#define _ASM_BITOPS_H + +#include <linux/compiler.h> + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <asm/barrier.h> + +#include <asm-generic/bitops/builtin-ffs.h> +#include <asm-generic/bitops/builtin-fls.h> +#include <asm-generic/bitops/builtin-__ffs.h> +#include <asm-generic/bitops/builtin-__fls.h> + +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls64.h> + +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> + +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> +#include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/ext2-atomic.h> + +#endif /* _ASM_BITOPS_H */ diff --git a/arch/loongarch/include/asm/bitrev.h b/arch/loongarch/include/asm/bitrev.h new file mode 100644 index 000000000..46f275b9c --- /dev/null +++ b/arch/loongarch/include/asm/bitrev.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __LOONGARCH_ASM_BITREV_H__ +#define __LOONGARCH_ASM_BITREV_H__ + +#include <linux/swab.h> + +static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x) +{ + u32 ret; + + asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab32(x))); + return ret; +} + +static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x) +{ + u16 ret; + + asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab16(x))); + return ret; +} + +static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x) +{ + u8 ret; + + asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(x)); + return ret; +} + +#endif /* __LOONGARCH_ASM_BITREV_H__ */ diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h new file mode 100644 index 000000000..ed0910e8b --- /dev/null +++ b/arch/loongarch/include/asm/bootinfo.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BOOTINFO_H +#define _ASM_BOOTINFO_H + +#include <linux/types.h> +#include <asm/setup.h> + +const char *get_system_type(void); + +extern void init_environ(void); +extern void memblock_init(void); +extern void platform_init(void); +extern void plat_swiotlb_setup(void); +extern int __init init_numa_memory(void); + +struct loongson_board_info { + int bios_size; + const char *bios_vendor; + const char *bios_version; + const char *bios_release_date; + const char *board_name; + const char *board_vendor; +}; + +struct loongson_system_configuration { + int nr_cpus; + int nr_nodes; + int boot_cpu_id; + int cores_per_node; + int cores_per_package; + unsigned long cores_io_master; + const char *cpuname; +}; + +extern u64 efi_system_table; +extern unsigned long fw_arg0, fw_arg1, fw_arg2; +extern struct loongson_board_info b_info; +extern struct loongson_system_configuration loongson_sysconf; + +static inline bool io_master(int cpu) +{ + return test_bit(cpu, &loongson_sysconf.cores_io_master); +} + +#endif /* _ASM_BOOTINFO_H */ diff --git a/arch/loongarch/include/asm/branch.h b/arch/loongarch/include/asm/branch.h new file mode 100644 index 000000000..9a133e4c0 --- /dev/null +++ b/arch/loongarch/include/asm/branch.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BRANCH_H +#define _ASM_BRANCH_H + +#include <asm/ptrace.h> + +static inline unsigned long exception_era(struct pt_regs *regs) +{ + return regs->csr_era; +} + +static inline void compute_return_era(struct pt_regs *regs) +{ + regs->csr_era += 4; +} + +#endif /* _ASM_BRANCH_H */ diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h new file mode 100644 index 000000000..d4ca3ba25 --- /dev/null +++ b/arch/loongarch/include/asm/bug.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BUG_H +#define __ASM_BUG_H + +#include <asm/break.h> +#include <linux/stringify.h> + +#ifndef CONFIG_DEBUG_BUGVERBOSE +#define _BUGVERBOSE_LOCATION(file, line) +#else +#define __BUGVERBOSE_LOCATION(file, line) \ + .pushsection .rodata.str, "aMS", @progbits, 1; \ + 10002: .string file; \ + .popsection; \ + \ + .long 10002b - .; \ + .short line; +#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) +#endif + +#ifndef CONFIG_GENERIC_BUG +#define __BUG_ENTRY(flags) +#else +#define __BUG_ENTRY(flags) \ + .pushsection __bug_table, "aw"; \ + .align 2; \ + 10000: .long 10001f - .; \ + _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ + .short flags; \ + .popsection; \ + 10001: +#endif + +#define ASM_BUG_FLAGS(flags) \ + __BUG_ENTRY(flags) \ + break BRK_BUG + +#define ASM_BUG() ASM_BUG_FLAGS(0) + +#define __BUG_FLAGS(flags) \ + asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags))); + +#define __WARN_FLAGS(flags) \ +do { \ + instrumentation_begin(); \ + __BUG_FLAGS(BUGFLAG_WARNING|(flags)); \ + instrumentation_end(); \ +} while (0) + +#define BUG() \ +do { \ + instrumentation_begin(); \ + __BUG_FLAGS(0); \ + unreachable(); \ +} while (0) + +#define HAVE_ARCH_BUG + +#include <asm-generic/bug.h> + +#endif /* __ASM_BUG_H */ diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h new file mode 100644 index 000000000..1b6d09617 --- /dev/null +++ b/arch/loongarch/include/asm/cache.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_CACHE_H +#define _ASM_CACHE_H + +#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define __read_mostly __section(".data..read_mostly") + +#endif /* _ASM_CACHE_H */ diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h new file mode 100644 index 000000000..0681788eb --- /dev/null +++ b/arch/loongarch/include/asm/cacheflush.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_CACHEFLUSH_H +#define _ASM_CACHEFLUSH_H + +#include <linux/mm.h> +#include <asm/cpu-info.h> +#include <asm/cacheops.h> + +static inline bool cache_present(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_PRESENT; +} + +static inline bool cache_private(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_PRIVATE; +} + +static inline bool cache_inclusive(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_INCLUSIVE; +} + +static inline unsigned int cpu_last_level_cache_line_size(void) +{ + int cache_present = boot_cpu_data.cache_leaves_present; + + return boot_cpu_data.cache_leaves[cache_present - 1].linesz; +} + +asmlinkage void __flush_cache_all(void); +void local_flush_icache_range(unsigned long start, unsigned long end); + +#define flush_icache_range local_flush_icache_range +#define flush_icache_user_range local_flush_icache_range + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 + +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) +#define flush_icache_page(vma, page) do { } while (0) +#define flush_icache_user_page(vma, page, addr, len) do { } while (0) +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#define cache_op(op, addr) \ + __asm__ __volatile__( \ + " cacop %0, %1 \n" \ + : \ + : "i" (op), "ZC" (*(unsigned char *)(addr))) + +static inline void flush_cache_line(int leaf, unsigned long addr) +{ + switch (leaf) { + case Cache_LEAF0: + cache_op(Index_Writeback_Inv_LEAF0, addr); + break; + case Cache_LEAF1: + cache_op(Index_Writeback_Inv_LEAF1, addr); + break; + case Cache_LEAF2: + cache_op(Index_Writeback_Inv_LEAF2, addr); + break; + case Cache_LEAF3: + cache_op(Index_Writeback_Inv_LEAF3, addr); + break; + case Cache_LEAF4: + cache_op(Index_Writeback_Inv_LEAF4, addr); + break; + case Cache_LEAF5: + cache_op(Index_Writeback_Inv_LEAF5, addr); + break; + default: + break; + } +} + +#include <asm-generic/cacheflush.h> + +#endif /* _ASM_CACHEFLUSH_H */ diff --git a/arch/loongarch/include/asm/cacheops.h b/arch/loongarch/include/asm/cacheops.h new file mode 100644 index 000000000..0f4a86f8e --- /dev/null +++ b/arch/loongarch/include/asm/cacheops.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cache operations for the cache instruction. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_CACHEOPS_H +#define __ASM_CACHEOPS_H + +/* + * Most cache ops are split into a 3 bit field identifying the cache, and a 2 + * bit field identifying the cache operation. + */ +#define CacheOp_Cache 0x07 +#define CacheOp_Op 0x18 + +#define Cache_LEAF0 0x00 +#define Cache_LEAF1 0x01 +#define Cache_LEAF2 0x02 +#define Cache_LEAF3 0x03 +#define Cache_LEAF4 0x04 +#define Cache_LEAF5 0x05 + +#define Index_Invalidate 0x08 +#define Index_Writeback_Inv 0x08 +#define Hit_Invalidate 0x10 +#define Hit_Writeback_Inv 0x10 +#define CacheOp_User_Defined 0x18 + +#define Index_Writeback_Inv_LEAF0 (Cache_LEAF0 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF1 (Cache_LEAF1 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF2 (Cache_LEAF2 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF3 (Cache_LEAF3 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF4 (Cache_LEAF4 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF5 (Cache_LEAF5 | Index_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF0 (Cache_LEAF0 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF1 (Cache_LEAF1 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF2 (Cache_LEAF2 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF3 (Cache_LEAF3 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF4 (Cache_LEAF4 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF5 (Cache_LEAF5 | Hit_Writeback_Inv) + +#endif /* __ASM_CACHEOPS_H */ diff --git a/arch/loongarch/include/asm/clocksource.h b/arch/loongarch/include/asm/clocksource.h new file mode 100644 index 000000000..58e64aa05 --- /dev/null +++ b/arch/loongarch/include/asm/clocksource.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_CLOCKSOURCE_H +#define __ASM_CLOCKSOURCE_H + +#include <asm/vdso/clocksource.h> + +#endif /* __ASM_CLOCKSOURCE_H */ diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h new file mode 100644 index 000000000..ecfa6cf79 --- /dev/null +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_CMPXCHG_H +#define __ASM_CMPXCHG_H + +#include <linux/bits.h> +#include <linux/build_bug.h> +#include <asm/barrier.h> + +#define __xchg_asm(amswap_db, m, val) \ +({ \ + __typeof(val) __ret; \ + \ + __asm__ __volatile__ ( \ + " "amswap_db" %1, %z2, %0 \n" \ + : "+ZB" (*m), "=&r" (__ret) \ + : "Jr" (val) \ + : "memory"); \ + \ + __ret; \ +}) + +static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, + unsigned int size) +{ + unsigned int shift; + u32 old32, mask, temp; + volatile u32 *ptr32; + + /* Mask value to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + val &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * exchange within the naturally aligned 4 byte integerthat includes + * it. + */ + shift = (unsigned long)ptr & 0x3; + shift *= BITS_PER_BYTE; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + + asm volatile ( + "1: ll.w %0, %3 \n" + " andn %1, %0, %z4 \n" + " or %1, %1, %z5 \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32) + : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift) + : "memory"); + + return (old32 & mask) >> shift; +} + +static __always_inline unsigned long +__xchg(volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + case 2: + return __xchg_small(ptr, x, size); + + case 4: + return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x); + + case 8: + return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x); + + default: + BUILD_BUG(); + } + + return 0; +} + +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __res; \ + \ + __res = (__typeof__(*(ptr))) \ + __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ + \ + __res; \ +}) + +#define __cmpxchg_asm(ld, st, m, old, new) \ +({ \ + __typeof(old) __ret; \ + \ + __asm__ __volatile__( \ + "1: " ld " %0, %2 # __cmpxchg_asm \n" \ + " bne %0, %z3, 2f \n" \ + " move $t0, %z4 \n" \ + " " st " $t0, %1 \n" \ + " beqz $t0, 1b \n" \ + "2: \n" \ + __WEAK_LLSC_MB \ + : "=&r" (__ret), "=ZB"(*m) \ + : "ZB"(*m), "Jr" (old), "Jr" (new) \ + : "t0", "memory"); \ + \ + __ret; \ +}) + +static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old, + unsigned int new, unsigned int size) +{ + unsigned int shift; + u32 old32, mask, temp; + volatile u32 *ptr32; + + /* Mask inputs to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + old &= mask; + new &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * compare & exchange within the naturally aligned 4 byte integer + * that includes it. + */ + shift = (unsigned long)ptr & 0x3; + shift *= BITS_PER_BYTE; + old <<= shift; + new <<= shift; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + + asm volatile ( + "1: ll.w %0, %3 \n" + " and %1, %0, %z4 \n" + " bne %1, %z5, 2f \n" + " andn %1, %0, %z4 \n" + " or %1, %1, %z6 \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + " b 3f \n" + "2: \n" + __WEAK_LLSC_MB + "3: \n" + : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32) + : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new) + : "memory"); + + return (old32 & mask) >> shift; +} + +static __always_inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) +{ + switch (size) { + case 1: + case 2: + return __cmpxchg_small(ptr, old, new, size); + + case 4: + return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr, + (u32)old, new); + + case 8: + return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr, + (u64)old, new); + + default: + BUILD_BUG(); + } + + return 0; +} + +#define arch_cmpxchg_local(ptr, old, new) \ + ((__typeof__(*(ptr))) \ + __cmpxchg((ptr), \ + (unsigned long)(__typeof__(*(ptr)))(old), \ + (unsigned long)(__typeof__(*(ptr)))(new), \ + sizeof(*(ptr)))) + +#define arch_cmpxchg(ptr, old, new) \ +({ \ + __typeof__(*(ptr)) __res; \ + \ + __res = arch_cmpxchg_local((ptr), (old), (new)); \ + \ + __res; \ +}) + +#ifdef CONFIG_64BIT +#define arch_cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_local((ptr), (o), (n)); \ + }) + +#define arch_cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg((ptr), (o), (n)); \ + }) +#else +#include <asm-generic/cmpxchg-local.h> +#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) +#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n)) +#endif + +#endif /* __ASM_CMPXCHG_H */ diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h new file mode 100644 index 000000000..f6177f133 --- /dev/null +++ b/arch/loongarch/include/asm/cpu-features.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + */ +#ifndef __ASM_CPU_FEATURES_H +#define __ASM_CPU_FEATURES_H + +#include <asm/cpu.h> +#include <asm/cpu-info.h> + +#define cpu_opt(opt) (cpu_data[0].options & (opt)) +#define cpu_has(feat) (cpu_data[0].options & BIT_ULL(feat)) + +#define cpu_has_loongarch (cpu_has_loongarch32 | cpu_has_loongarch64) +#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT) +#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) + +#ifdef CONFIG_32BIT +# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) +# define cpu_vabits 31 +# define cpu_pabits 31 +#endif + +#ifdef CONFIG_64BIT +# define cpu_has_64bits 1 +# define cpu_vabits cpu_data[0].vabits +# define cpu_pabits cpu_data[0].pabits +# define __NEED_ADDRBITS_PROBE +#endif + +/* + * SMP assumption: Options of CPU 0 are a superset of all processors. + * This is true for all known LoongArch systems. + */ +#define cpu_has_cpucfg cpu_opt(LOONGARCH_CPU_CPUCFG) +#define cpu_has_lam cpu_opt(LOONGARCH_CPU_LAM) +#define cpu_has_ual cpu_opt(LOONGARCH_CPU_UAL) +#define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU) +#define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX) +#define cpu_has_lasx cpu_opt(LOONGARCH_CPU_LASX) +#define cpu_has_crc32 cpu_opt(LOONGARCH_CPU_CRC32) +#define cpu_has_complex cpu_opt(LOONGARCH_CPU_COMPLEX) +#define cpu_has_crypto cpu_opt(LOONGARCH_CPU_CRYPTO) +#define cpu_has_lvz cpu_opt(LOONGARCH_CPU_LVZ) +#define cpu_has_lbt_x86 cpu_opt(LOONGARCH_CPU_LBT_X86) +#define cpu_has_lbt_arm cpu_opt(LOONGARCH_CPU_LBT_ARM) +#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS) +#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips) +#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR) +#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB) +#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH) +#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT) +#define cpu_has_csripi cpu_opt(LOONGARCH_CPU_CSRIPI) +#define cpu_has_extioi cpu_opt(LOONGARCH_CPU_EXTIOI) +#define cpu_has_prefetch cpu_opt(LOONGARCH_CPU_PREFETCH) +#define cpu_has_pmp cpu_opt(LOONGARCH_CPU_PMP) +#define cpu_has_perf cpu_opt(LOONGARCH_CPU_PMP) +#define cpu_has_scalefreq cpu_opt(LOONGARCH_CPU_SCALEFREQ) +#define cpu_has_flatmode cpu_opt(LOONGARCH_CPU_FLATMODE) +#define cpu_has_eiodecode cpu_opt(LOONGARCH_CPU_EIODECODE) +#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID) +#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR) + + +#endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h new file mode 100644 index 000000000..cd73a6f57 --- /dev/null +++ b/arch/loongarch/include/asm/cpu-info.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_CPU_INFO_H +#define __ASM_CPU_INFO_H + +#include <linux/cache.h> +#include <linux/types.h> + +#include <asm/loongarch.h> + +/* cache_desc->flags */ +enum { + CACHE_PRESENT = (1 << 0), + CACHE_PRIVATE = (1 << 1), /* core private cache */ + CACHE_INCLUSIVE = (1 << 2), /* include the inner level caches */ +}; + +/* + * Descriptor for a cache + */ +struct cache_desc { + unsigned char type; + unsigned char level; + unsigned short sets; /* Number of lines per set */ + unsigned char ways; /* Number of ways */ + unsigned char linesz; /* Size of line in bytes */ + unsigned char flags; /* Flags describing cache properties */ +}; + +#define CACHE_LEVEL_MAX 3 +#define CACHE_LEAVES_MAX 6 + +struct cpuinfo_loongarch { + u64 asid_cache; + unsigned long asid_mask; + + /* + * Capability and feature descriptor structure for LoongArch CPU + */ + unsigned long long options; + unsigned int processor_id; + unsigned int fpu_vers; + unsigned int fpu_csr0; + unsigned int fpu_mask; + unsigned int cputype; + int isa_level; + int tlbsize; + int tlbsizemtlb; + int tlbsizestlbsets; + int tlbsizestlbways; + int cache_leaves_present; /* number of cache_leaves[] elements */ + struct cache_desc cache_leaves[CACHE_LEAVES_MAX]; + int core; /* physical core number in package */ + int package;/* physical package number */ + int vabits; /* Virtual Address size in bits */ + int pabits; /* Physical Address size in bits */ + unsigned int ksave_mask; /* Usable KSave mask. */ + unsigned int watch_dreg_count; /* Number data breakpoints */ + unsigned int watch_ireg_count; /* Number instruction breakpoints */ + unsigned int watch_reg_use_cnt; /* min(NUM_WATCH_REGS, watch_dreg_count + watch_ireg_count), Usable by ptrace */ +} __aligned(SMP_CACHE_BYTES); + +extern struct cpuinfo_loongarch cpu_data[]; +#define boot_cpu_data cpu_data[0] +#define current_cpu_data cpu_data[smp_processor_id()] +#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] + +extern void cpu_probe(void); + +extern const char *__cpu_family[]; +extern const char *__cpu_full_name[]; +#define cpu_family_string() __cpu_family[raw_smp_processor_id()] +#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()] + +struct seq_file; +struct notifier_block; + +extern int register_proc_cpuinfo_notifier(struct notifier_block *nb); +extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v); + +#define proc_cpuinfo_notifier(fn, pri) \ +({ \ + static struct notifier_block fn##_nb = { \ + .notifier_call = fn, \ + .priority = pri \ + }; \ + \ + register_proc_cpuinfo_notifier(&fn##_nb); \ +}) + +struct proc_cpuinfo_notifier_args { + struct seq_file *m; + unsigned long n; +}; + +static inline bool cpus_are_siblings(int cpua, int cpub) +{ + struct cpuinfo_loongarch *infoa = &cpu_data[cpua]; + struct cpuinfo_loongarch *infob = &cpu_data[cpub]; + + if (infoa->package != infob->package) + return false; + + if (infoa->core != infob->core) + return false; + + return true; +} + +static inline unsigned long cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo) +{ + return cpuinfo->asid_mask; +} + +static inline void set_cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo, + unsigned long asid_mask) +{ + cpuinfo->asid_mask = asid_mask; +} + +#endif /* __ASM_CPU_INFO_H */ diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h new file mode 100644 index 000000000..927577055 --- /dev/null +++ b/arch/loongarch/include/asm/cpu.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * cpu.h: Values of the PRID register used to match up + * various LoongArch CPU types. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_CPU_H +#define _ASM_CPU_H + +/* + * As described in LoongArch specs from Loongson Technology, the PRID register + * (CPUCFG.00) has the following layout: + * + * +---------------+----------------+------------+--------------------+ + * | Reserved | Company ID | Series ID | Product ID | + * +---------------+----------------+------------+--------------------+ + * 31 24 23 16 15 12 11 0 + */ + +/* + * Assigned Company values for bits 23:16 of the PRID register. + */ + +#define PRID_COMP_MASK 0xff0000 + +#define PRID_COMP_LOONGSON 0x140000 + +/* + * Assigned Series ID values for bits 15:12 of the PRID register. In order + * to detect a certain CPU type exactly eventually additional registers may + * need to be examined. + */ + +#define PRID_SERIES_MASK 0xf000 + +#define PRID_SERIES_LA132 0x8000 /* Loongson 32bit */ +#define PRID_SERIES_LA264 0xa000 /* Loongson 64bit, 2-issue */ +#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit,3-issue */ +#define PRID_SERIES_LA464 0xc000 /* Loongson 64bit, 4-issue */ +#define PRID_SERIES_LA664 0xd000 /* Loongson 64bit, 6-issue */ + +/* + * Particular Product ID values for bits 11:0 of the PRID register. + */ + +#define PRID_PRODUCT_MASK 0x0fff + +#if !defined(__ASSEMBLY__) + +enum cpu_type_enum { + CPU_UNKNOWN, + CPU_LOONGSON32, + CPU_LOONGSON64, + CPU_LAST +}; + +#endif /* !__ASSEMBLY */ + +/* + * ISA Level encodings + * + */ + +#define LOONGARCH_CPU_ISA_LA32R 0x00000001 +#define LOONGARCH_CPU_ISA_LA32S 0x00000002 +#define LOONGARCH_CPU_ISA_LA64 0x00000004 + +#define LOONGARCH_CPU_ISA_32BIT (LOONGARCH_CPU_ISA_LA32R | LOONGARCH_CPU_ISA_LA32S) +#define LOONGARCH_CPU_ISA_64BIT LOONGARCH_CPU_ISA_LA64 + +/* + * CPU Option encodings + */ +#define CPU_FEATURE_CPUCFG 0 /* CPU has CPUCFG */ +#define CPU_FEATURE_LAM 1 /* CPU has Atomic instructions */ +#define CPU_FEATURE_UAL 2 /* CPU supports unaligned access */ +#define CPU_FEATURE_FPU 3 /* CPU has FPU */ +#define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */ +#define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */ +#define CPU_FEATURE_CRC32 6 /* CPU has CRC32 instructions */ +#define CPU_FEATURE_COMPLEX 7 /* CPU has Complex instructions */ +#define CPU_FEATURE_CRYPTO 8 /* CPU has Crypto instructions */ +#define CPU_FEATURE_LVZ 9 /* CPU has Virtualization extension */ +#define CPU_FEATURE_LBT_X86 10 /* CPU has X86 Binary Translation */ +#define CPU_FEATURE_LBT_ARM 11 /* CPU has ARM Binary Translation */ +#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */ +#define CPU_FEATURE_TLB 13 /* CPU has TLB */ +#define CPU_FEATURE_CSR 14 /* CPU has CSR */ +#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */ +#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */ +#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */ +#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */ +#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */ +#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */ +#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */ +#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */ +#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */ +#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ +#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ + +#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) +#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) +#define LOONGARCH_CPU_UAL BIT_ULL(CPU_FEATURE_UAL) +#define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU) +#define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX) +#define LOONGARCH_CPU_LASX BIT_ULL(CPU_FEATURE_LASX) +#define LOONGARCH_CPU_CRC32 BIT_ULL(CPU_FEATURE_CRC32) +#define LOONGARCH_CPU_COMPLEX BIT_ULL(CPU_FEATURE_COMPLEX) +#define LOONGARCH_CPU_CRYPTO BIT_ULL(CPU_FEATURE_CRYPTO) +#define LOONGARCH_CPU_LVZ BIT_ULL(CPU_FEATURE_LVZ) +#define LOONGARCH_CPU_LBT_X86 BIT_ULL(CPU_FEATURE_LBT_X86) +#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM) +#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS) +#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB) +#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR) +#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH) +#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT) +#define LOONGARCH_CPU_CSRIPI BIT_ULL(CPU_FEATURE_CSRIPI) +#define LOONGARCH_CPU_EXTIOI BIT_ULL(CPU_FEATURE_EXTIOI) +#define LOONGARCH_CPU_PREFETCH BIT_ULL(CPU_FEATURE_PREFETCH) +#define LOONGARCH_CPU_PMP BIT_ULL(CPU_FEATURE_PMP) +#define LOONGARCH_CPU_SCALEFREQ BIT_ULL(CPU_FEATURE_SCALEFREQ) +#define LOONGARCH_CPU_FLATMODE BIT_ULL(CPU_FEATURE_FLATMODE) +#define LOONGARCH_CPU_EIODECODE BIT_ULL(CPU_FEATURE_EIODECODE) +#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID) +#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR) + +#endif /* _ASM_CPU_H */ diff --git a/arch/loongarch/include/asm/cpufeature.h b/arch/loongarch/include/asm/cpufeature.h new file mode 100644 index 000000000..4da22a8e6 --- /dev/null +++ b/arch/loongarch/include/asm/cpufeature.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CPU feature definitions for module loading, used by + * module_cpu_feature_match(), see uapi/asm/hwcap.h for LoongArch CPU features. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_CPUFEATURE_H +#define __ASM_CPUFEATURE_H + +#include <uapi/asm/hwcap.h> +#include <asm/elf.h> + +#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) + +#define cpu_feature(x) ilog2(HWCAP_ ## x) + +static inline bool cpu_have_feature(unsigned int num) +{ + return elf_hwcap & (1UL << num); +} + +#endif /* __ASM_CPUFEATURE_H */ diff --git a/arch/loongarch/include/asm/delay.h b/arch/loongarch/include/asm/delay.h new file mode 100644 index 000000000..36d775191 --- /dev/null +++ b/arch/loongarch/include/asm/delay.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_DELAY_H +#define _ASM_DELAY_H + +#include <linux/param.h> + +extern void __delay(unsigned long cycles); +extern void __ndelay(unsigned long ns); +extern void __udelay(unsigned long us); + +#define ndelay(ns) __ndelay(ns) +#define udelay(us) __udelay(us) + +/* make sure "usecs *= ..." in udelay do not overflow. */ +#if HZ >= 1000 +#define MAX_UDELAY_MS 1 +#elif HZ <= 200 +#define MAX_UDELAY_MS 5 +#else +#define MAX_UDELAY_MS (1000 / HZ) +#endif + +#endif /* _ASM_DELAY_H */ diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h new file mode 100644 index 000000000..75ccd808a --- /dev/null +++ b/arch/loongarch/include/asm/dma-direct.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _LOONGARCH_DMA_DIRECT_H +#define _LOONGARCH_DMA_DIRECT_H + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); + +#endif /* _LOONGARCH_DMA_DIRECT_H */ diff --git a/arch/loongarch/include/asm/dma.h b/arch/loongarch/include/asm/dma.h new file mode 100644 index 000000000..1a8866319 --- /dev/null +++ b/arch/loongarch/include/asm/dma.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_DMA_H +#define __ASM_DMA_H + +#define MAX_DMA_ADDRESS PAGE_OFFSET +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + +#endif diff --git a/arch/loongarch/include/asm/dmi.h b/arch/loongarch/include/asm/dmi.h new file mode 100644 index 000000000..605493417 --- /dev/null +++ b/arch/loongarch/include/asm/dmi.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_DMI_H +#define _ASM_DMI_H + +#include <linux/io.h> +#include <linux/memblock.h> + +#define dmi_early_remap(x, l) dmi_remap(x, l) +#define dmi_early_unmap(x, l) dmi_unmap(x) +#define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE) + +static inline void *dmi_remap(u64 phys_addr, unsigned long size) +{ + return ((void *)TO_CACHE(phys_addr)); +} + +static inline void dmi_unmap(void *addr) +{ +} + +#endif /* _ASM_DMI_H */ diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h new file mode 100644 index 000000000..174567b00 --- /dev/null +++ b/arch/loongarch/include/asm/efi.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LOONGARCH_EFI_H +#define _ASM_LOONGARCH_EFI_H + +#include <linux/efi.h> + +void __init efi_init(void); +void __init efi_runtime_init(void); +void efifb_setup_from_dmi(struct screen_info *si, const char *opt); + +#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000004 /* Bit 2: CSR.CRMD.IE */ + +#define arch_efi_call_virt_setup() +#define arch_efi_call_virt_teardown() + +#define EFI_ALLOC_ALIGN SZ_64K +#define EFI_RT_VIRTUAL_OFFSET CSR_DMW0_BASE + +static inline struct screen_info *alloc_screen_info(void) +{ + return &screen_info; +} + +static inline void free_screen_info(struct screen_info *si) +{ +} + +static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) +{ + return ULONG_MAX; +} + +#endif /* _ASM_LOONGARCH_EFI_H */ diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h new file mode 100644 index 000000000..f16bd4245 --- /dev/null +++ b/arch/loongarch/include/asm/elf.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_ELF_H +#define _ASM_ELF_H + +#include <linux/auxvec.h> +#include <linux/fs.h> +#include <uapi/linux/elf.h> + +#include <asm/current.h> +#include <asm/vdso.h> + +/* The ABI of a file. */ +#define EF_LOONGARCH_ABI_LP64_SOFT_FLOAT 0x1 +#define EF_LOONGARCH_ABI_LP64_SINGLE_FLOAT 0x2 +#define EF_LOONGARCH_ABI_LP64_DOUBLE_FLOAT 0x3 + +#define EF_LOONGARCH_ABI_ILP32_SOFT_FLOAT 0x5 +#define EF_LOONGARCH_ABI_ILP32_SINGLE_FLOAT 0x6 +#define EF_LOONGARCH_ABI_ILP32_DOUBLE_FLOAT 0x7 + +/* LoongArch relocation types used by the dynamic linker */ +#define R_LARCH_NONE 0 +#define R_LARCH_32 1 +#define R_LARCH_64 2 +#define R_LARCH_RELATIVE 3 +#define R_LARCH_COPY 4 +#define R_LARCH_JUMP_SLOT 5 +#define R_LARCH_TLS_DTPMOD32 6 +#define R_LARCH_TLS_DTPMOD64 7 +#define R_LARCH_TLS_DTPREL32 8 +#define R_LARCH_TLS_DTPREL64 9 +#define R_LARCH_TLS_TPREL32 10 +#define R_LARCH_TLS_TPREL64 11 +#define R_LARCH_IRELATIVE 12 +#define R_LARCH_MARK_LA 20 +#define R_LARCH_MARK_PCREL 21 +#define R_LARCH_SOP_PUSH_PCREL 22 +#define R_LARCH_SOP_PUSH_ABSOLUTE 23 +#define R_LARCH_SOP_PUSH_DUP 24 +#define R_LARCH_SOP_PUSH_GPREL 25 +#define R_LARCH_SOP_PUSH_TLS_TPREL 26 +#define R_LARCH_SOP_PUSH_TLS_GOT 27 +#define R_LARCH_SOP_PUSH_TLS_GD 28 +#define R_LARCH_SOP_PUSH_PLT_PCREL 29 +#define R_LARCH_SOP_ASSERT 30 +#define R_LARCH_SOP_NOT 31 +#define R_LARCH_SOP_SUB 32 +#define R_LARCH_SOP_SL 33 +#define R_LARCH_SOP_SR 34 +#define R_LARCH_SOP_ADD 35 +#define R_LARCH_SOP_AND 36 +#define R_LARCH_SOP_IF_ELSE 37 +#define R_LARCH_SOP_POP_32_S_10_5 38 +#define R_LARCH_SOP_POP_32_U_10_12 39 +#define R_LARCH_SOP_POP_32_S_10_12 40 +#define R_LARCH_SOP_POP_32_S_10_16 41 +#define R_LARCH_SOP_POP_32_S_10_16_S2 42 +#define R_LARCH_SOP_POP_32_S_5_20 43 +#define R_LARCH_SOP_POP_32_S_0_5_10_16_S2 44 +#define R_LARCH_SOP_POP_32_S_0_10_10_16_S2 45 +#define R_LARCH_SOP_POP_32_U 46 +#define R_LARCH_ADD8 47 +#define R_LARCH_ADD16 48 +#define R_LARCH_ADD24 49 +#define R_LARCH_ADD32 50 +#define R_LARCH_ADD64 51 +#define R_LARCH_SUB8 52 +#define R_LARCH_SUB16 53 +#define R_LARCH_SUB24 54 +#define R_LARCH_SUB32 55 +#define R_LARCH_SUB64 56 +#define R_LARCH_GNU_VTINHERIT 57 +#define R_LARCH_GNU_VTENTRY 58 +#define R_LARCH_B16 64 +#define R_LARCH_B21 65 +#define R_LARCH_B26 66 +#define R_LARCH_ABS_HI20 67 +#define R_LARCH_ABS_LO12 68 +#define R_LARCH_ABS64_LO20 69 +#define R_LARCH_ABS64_HI12 70 +#define R_LARCH_PCALA_HI20 71 +#define R_LARCH_PCALA_LO12 72 +#define R_LARCH_PCALA64_LO20 73 +#define R_LARCH_PCALA64_HI12 74 +#define R_LARCH_GOT_PC_HI20 75 +#define R_LARCH_GOT_PC_LO12 76 +#define R_LARCH_GOT64_PC_LO20 77 +#define R_LARCH_GOT64_PC_HI12 78 +#define R_LARCH_GOT_HI20 79 +#define R_LARCH_GOT_LO12 80 +#define R_LARCH_GOT64_LO20 81 +#define R_LARCH_GOT64_HI12 82 +#define R_LARCH_TLS_LE_HI20 83 +#define R_LARCH_TLS_LE_LO12 84 +#define R_LARCH_TLS_LE64_LO20 85 +#define R_LARCH_TLS_LE64_HI12 86 +#define R_LARCH_TLS_IE_PC_HI20 87 +#define R_LARCH_TLS_IE_PC_LO12 88 +#define R_LARCH_TLS_IE64_PC_LO20 89 +#define R_LARCH_TLS_IE64_PC_HI12 90 +#define R_LARCH_TLS_IE_HI20 91 +#define R_LARCH_TLS_IE_LO12 92 +#define R_LARCH_TLS_IE64_LO20 93 +#define R_LARCH_TLS_IE64_HI12 94 +#define R_LARCH_TLS_LD_PC_HI20 95 +#define R_LARCH_TLS_LD_HI20 96 +#define R_LARCH_TLS_GD_PC_HI20 97 +#define R_LARCH_TLS_GD_HI20 98 +#define R_LARCH_32_PCREL 99 +#define R_LARCH_RELAX 100 +#define R_LARCH_DELETE 101 +#define R_LARCH_ALIGN 102 +#define R_LARCH_PCREL20_S2 103 +#define R_LARCH_CFA 104 +#define R_LARCH_ADD6 105 +#define R_LARCH_SUB6 106 +#define R_LARCH_ADD_ULEB128 107 +#define R_LARCH_SUB_ULEB128 108 +#define R_LARCH_64_PCREL 109 + +#ifndef ELF_ARCH + +/* ELF register definitions */ + +/* + * General purpose have the following registers: + * Register Number + * GPRs 32 + * ORIG_A0 1 + * ERA 1 + * BADVADDR 1 + * CRMD 1 + * PRMD 1 + * EUEN 1 + * ECFG 1 + * ESTAT 1 + * Reserved 5 + */ +#define ELF_NGREG 45 + +/* + * Floating point have the following registers: + * Register Number + * FPR 32 + * FCC 1 + * FCSR 1 + */ +#define ELF_NFPREG 34 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs); + +#ifdef CONFIG_32BIT +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch elf32_check_arch + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 + +#define ELF_CORE_COPY_REGS(dest, regs) \ + loongarch_dump_regs32((u32 *)&(dest), (regs)); + +#endif /* CONFIG_32BIT */ + +#ifdef CONFIG_64BIT +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch elf64_check_arch + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 + +#define ELF_CORE_COPY_REGS(dest, regs) \ + loongarch_dump_regs64((u64 *)&(dest), (regs)); + +#endif /* CONFIG_64BIT */ + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_LOONGARCH + +#endif /* !defined(ELF_ARCH) */ + +#define loongarch_elf_check_machine(x) ((x)->e_machine == EM_LOONGARCH) + +#define vmcore_elf32_check_arch loongarch_elf_check_machine +#define vmcore_elf64_check_arch loongarch_elf_check_machine + +/* + * Return non-zero if HDR identifies an 32bit ELF binary. + */ +#define elf32_check_arch(hdr) \ +({ \ + int __res = 1; \ + struct elfhdr *__h = (hdr); \ + \ + if (!loongarch_elf_check_machine(__h)) \ + __res = 0; \ + if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ + __res = 0; \ + \ + __res; \ +}) + +/* + * Return non-zero if HDR identifies an 64bit ELF binary. + */ +#define elf64_check_arch(hdr) \ +({ \ + int __res = 1; \ + struct elfhdr *__h = (hdr); \ + \ + if (!loongarch_elf_check_machine(__h)) \ + __res = 0; \ + if (__h->e_ident[EI_CLASS] != ELFCLASS64) \ + __res = 0; \ + \ + __res; \ +}) + +#ifdef CONFIG_32BIT + +#define SET_PERSONALITY2(ex, state) \ +do { \ + current->thread.vdso = &vdso_info; \ + \ + if (personality(current->personality) != PER_LINUX) \ + set_personality(PER_LINUX); \ +} while (0) + +#endif /* CONFIG_32BIT */ + +#ifdef CONFIG_64BIT + +#define SET_PERSONALITY2(ex, state) \ +do { \ + unsigned int p; \ + \ + clear_thread_flag(TIF_32BIT_REGS); \ + clear_thread_flag(TIF_32BIT_ADDR); \ + \ + current->thread.vdso = &vdso_info; \ + \ + p = personality(current->personality); \ + if (p != PER_LINUX32 && p != PER_LINUX) \ + set_personality(PER_LINUX); \ +} while (0) + +#endif /* CONFIG_64BIT */ + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. This could be done in userspace, + * but it's not easy, and we've already done it here. + */ + +#define ELF_HWCAP (elf_hwcap) +extern unsigned int elf_hwcap; +#include <asm/hwcap.h> + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ + +#define ELF_PLATFORM __elf_platform +extern const char *__elf_platform; + +#define ELF_PLAT_INIT(_r, load_addr) do { \ + _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ + _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ + _r->regs[9] = _r->regs[10] /* syscall n */ = _r->regs[12] = 0; \ + _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ + _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ + _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ + _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \ + _r->regs[29] = _r->regs[30] = _r->regs[31] = 0; \ +} while (0) + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ + +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (unsigned long)current->mm->context.vdso); \ +} while (0) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +struct arch_elf_state { + int fp_abi; + int interp_fp_abi; +}; + +#define LOONGARCH_ABI_FP_ANY (0) + +#define INIT_ARCH_ELF_STATE { \ + .fp_abi = LOONGARCH_ABI_FP_ANY, \ + .interp_fp_abi = LOONGARCH_ABI_FP_ANY, \ +} + +extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, + bool is_interp, struct arch_elf_state *state); + +extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr, + struct arch_elf_state *state); + +#endif /* _ASM_ELF_H */ diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h new file mode 100644 index 000000000..0fe2a098d --- /dev/null +++ b/arch/loongarch/include/asm/entry-common.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_LOONGARCH_ENTRY_COMMON_H +#define ARCH_LOONGARCH_ENTRY_COMMON_H + +#include <linux/sched.h> +#include <linux/processor.h> + +static inline bool on_thread_stack(void) +{ + return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); +} + +#endif diff --git a/arch/loongarch/include/asm/exec.h b/arch/loongarch/include/asm/exec.h new file mode 100644 index 000000000..ba0220812 --- /dev/null +++ b/arch/loongarch/include/asm/exec.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_EXEC_H +#define _ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* _ASM_EXEC_H */ diff --git a/arch/loongarch/include/asm/fb.h b/arch/loongarch/include/asm/fb.h new file mode 100644 index 000000000..3116bde87 --- /dev/null +++ b/arch/loongarch/include/asm/fb.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_FB_H_ +#define _ASM_FB_H_ + +#include <linux/fb.h> +#include <linux/fs.h> +#include <asm/page.h> + +static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, + unsigned long off) +{ + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); +} + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* _ASM_FB_H_ */ diff --git a/arch/loongarch/include/asm/fixmap.h b/arch/loongarch/include/asm/fixmap.h new file mode 100644 index 000000000..d2e55ae55 --- /dev/null +++ b/arch/loongarch/include/asm/fixmap.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * fixmap.h: compile-time virtual memory allocation + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +#define NR_FIX_BTMAPS 64 + +enum fixed_addresses { + FIX_HOLE, + FIX_EARLYCON_MEM_BASE, + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXMAP_PAGE_IO PAGE_KERNEL_SUC + +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); + +#include <asm-generic/fixmap.h> + +#endif diff --git a/arch/loongarch/include/asm/fpregdef.h b/arch/loongarch/include/asm/fpregdef.h new file mode 100644 index 000000000..b6be52783 --- /dev/null +++ b/arch/loongarch/include/asm/fpregdef.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for the FPU register names + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_FPREGDEF_H +#define _ASM_FPREGDEF_H + +#define fa0 $f0 /* argument registers, fa0/fa1 reused as fv0/fv1 for return value */ +#define fa1 $f1 +#define fa2 $f2 +#define fa3 $f3 +#define fa4 $f4 +#define fa5 $f5 +#define fa6 $f6 +#define fa7 $f7 +#define ft0 $f8 /* caller saved */ +#define ft1 $f9 +#define ft2 $f10 +#define ft3 $f11 +#define ft4 $f12 +#define ft5 $f13 +#define ft6 $f14 +#define ft7 $f15 +#define ft8 $f16 +#define ft9 $f17 +#define ft10 $f18 +#define ft11 $f19 +#define ft12 $f20 +#define ft13 $f21 +#define ft14 $f22 +#define ft15 $f23 +#define fs0 $f24 /* callee saved */ +#define fs1 $f25 +#define fs2 $f26 +#define fs3 $f27 +#define fs4 $f28 +#define fs5 $f29 +#define fs6 $f30 +#define fs7 $f31 + +/* + * Current binutils expects *GPRs* at FCSR position for the FCSR + * operation instructions, so define aliases for those used. + */ +#define fcsr0 $r0 +#define fcsr1 $r1 +#define fcsr2 $r2 +#define fcsr3 $r3 + +#endif /* _ASM_FPREGDEF_H */ diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h new file mode 100644 index 000000000..358b254d9 --- /dev/null +++ b/arch/loongarch/include/asm/fpu.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_FPU_H +#define _ASM_FPU_H + +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/ptrace.h> +#include <linux/thread_info.h> +#include <linux/bitops.h> + +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm/current.h> +#include <asm/loongarch.h> +#include <asm/processor.h> +#include <asm/ptrace.h> + +struct sigcontext; + +extern void _init_fpu(unsigned int); +extern void _save_fp(struct loongarch_fpu *); +extern void _restore_fp(struct loongarch_fpu *); + +/* + * Mask the FCSR Cause bits according to the Enable bits, observing + * that Unimplemented is always enabled. + */ +static inline unsigned long mask_fcsr_x(unsigned long fcsr) +{ + return fcsr & ((fcsr & FPU_CSR_ALL_E) << + (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))); +} + +static inline int is_fp_enabled(void) +{ + return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ? + 1 : 0; +} + +#define enable_fpu() set_csr_euen(CSR_EUEN_FPEN) + +#define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN) + +#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU) + +static inline int is_fpu_owner(void) +{ + return test_thread_flag(TIF_USEDFPU); +} + +static inline void __own_fpu(void) +{ + enable_fpu(); + set_thread_flag(TIF_USEDFPU); + KSTK_EUEN(current) |= CSR_EUEN_FPEN; +} + +static inline void own_fpu_inatomic(int restore) +{ + if (cpu_has_fpu && !is_fpu_owner()) { + __own_fpu(); + if (restore) + _restore_fp(¤t->thread.fpu); + } +} + +static inline void own_fpu(int restore) +{ + preempt_disable(); + own_fpu_inatomic(restore); + preempt_enable(); +} + +static inline void lose_fpu_inatomic(int save, struct task_struct *tsk) +{ + if (is_fpu_owner()) { + if (save) + _save_fp(&tsk->thread.fpu); + disable_fpu(); + clear_tsk_thread_flag(tsk, TIF_USEDFPU); + } + KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); +} + +static inline void lose_fpu(int save) +{ + preempt_disable(); + lose_fpu_inatomic(save, current); + preempt_enable(); +} + +static inline void init_fpu(void) +{ + unsigned int fcsr = current->thread.fpu.fcsr; + + __own_fpu(); + _init_fpu(fcsr); + set_used_math(); +} + +static inline void save_fp(struct task_struct *tsk) +{ + if (cpu_has_fpu) + _save_fp(&tsk->thread.fpu); +} + +static inline void restore_fp(struct task_struct *tsk) +{ + if (cpu_has_fpu) + _restore_fp(&tsk->thread.fpu); +} + +static inline union fpureg *get_fpu_regs(struct task_struct *tsk) +{ + if (tsk == current) { + preempt_disable(); + if (is_fpu_owner()) + _save_fp(¤t->thread.fpu); + preempt_enable(); + } + + return tsk->thread.fpu.fpr; +} + +#endif /* _ASM_FPU_H */ diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h new file mode 100644 index 000000000..feb6658c8 --- /dev/null +++ b/arch/loongarch/include/asm/futex.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/barrier.h> +#include <asm/errno.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +{ \ + __asm__ __volatile__( \ + "1: ll.w %1, %4 # __futex_atomic_op\n" \ + " " insn " \n" \ + "2: sc.w $t0, %2 \n" \ + " beqz $t0, 1b \n" \ + "3: \n" \ + " .section .fixup,\"ax\" \n" \ + "4: li.w %0, %6 \n" \ + " b 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " "__UA_ADDR "\t1b, 4b \n" \ + " "__UA_ADDR "\t2b, 4b \n" \ + " .previous \n" \ + : "=r" (ret), "=&r" (oldval), \ + "=ZC" (*uaddr) \ + : "0" (0), "ZC" (*uaddr), "Jr" (oparg), \ + "i" (-EFAULT) \ + : "memory", "t0"); \ +} + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret = 0; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("move $t0, %z5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add.w $t0, %1, %z5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or $t0, %1, %z5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and $t0, %1, %z5", ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor $t0, %1, %z5", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) +{ + int ret = 0; + u32 val = 0; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__( + "# futex_atomic_cmpxchg_inatomic \n" + "1: ll.w %1, %3 \n" + " bne %1, %z4, 3f \n" + " move $t0, %z5 \n" + "2: sc.w $t0, %2 \n" + " beqz $t0, 1b \n" + "3: \n" + __WEAK_LLSC_MB + " .section .fixup,\"ax\" \n" + "4: li.d %0, %6 \n" + " b 3b \n" + " .previous \n" + " .section __ex_table,\"a\" \n" + " "__UA_ADDR "\t1b, 4b \n" + " "__UA_ADDR "\t2b, 4b \n" + " .previous \n" + : "+r" (ret), "=&r" (val), "=ZC" (*uaddr) + : "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval), + "i" (-EFAULT) + : "memory", "t0"); + + *uval = val; + + return ret; +} + +#endif /* _ASM_FUTEX_H */ diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h new file mode 100644 index 000000000..0ef3b18f8 --- /dev/null +++ b/arch/loongarch/include/asm/hardirq.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_HARDIRQ_H +#define _ASM_HARDIRQ_H + +#include <linux/cache.h> +#include <linux/threads.h> +#include <linux/irq.h> + +extern void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#define NR_IPI 2 + +typedef struct { + unsigned int ipi_irqs[NR_IPI]; + unsigned int __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +#define __ARCH_IRQ_STAT + +#endif /* _ASM_HARDIRQ_H */ diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h new file mode 100644 index 000000000..aa44b3fe4 --- /dev/null +++ b/arch/loongarch/include/asm/hugetlb.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_HUGETLB_H +#define __ASM_HUGETLB_H + +#include <asm/page.h> + +uint64_t pmd_to_entrylo(unsigned long pmd_val); + +#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, + unsigned long len) +{ + unsigned long task_size = STACK_TOP; + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + if (len > task_size) + return -ENOMEM; + if (task_size - len < addr) + return -EINVAL; + return 0; +} + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t clear; + pte_t pte = *ptep; + + pte_val(clear) = (unsigned long)invalid_pte_table; + set_pte_at(mm, addr, ptep, clear); + return pte; +} + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t pte; + + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + flush_tlb_page(vma, addr); + return pte; +} + +#define __HAVE_ARCH_HUGE_PTE_NONE +static inline int huge_pte_none(pte_t pte) +{ + unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL; + return !val || (val == (unsigned long)invalid_pte_table); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t pte, + int dirty) +{ + int changed = !pte_same(*ptep, pte); + + if (changed) { + set_pte_at(vma->vm_mm, addr, ptep, pte); + /* + * There could be some standard sized pages in there, + * get them all. + */ + flush_tlb_range(vma, addr, addr + HPAGE_SIZE); + } + return changed; +} + +#include <asm-generic/hugetlb.h> + +#endif /* __ASM_HUGETLB_H */ diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h new file mode 100644 index 000000000..af4f4e8fb --- /dev/null +++ b/arch/loongarch/include/asm/hw_irq.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_HW_IRQ_H +#define __ASM_HW_IRQ_H + +#include <linux/atomic.h> + +extern atomic_t irq_err_count; + +/* + * interrupt-retrigger: NOP for now. This may not be appropriate for all + * machines, we'll see ... + */ + +#endif /* __ASM_HW_IRQ_H */ diff --git a/arch/loongarch/include/asm/idle.h b/arch/loongarch/include/asm/idle.h new file mode 100644 index 000000000..f7f2b7dbf --- /dev/null +++ b/arch/loongarch/include/asm/idle.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_IDLE_H +#define __ASM_IDLE_H + +#include <linux/linkage.h> + +extern asmlinkage void __arch_cpu_idle(void); + +#endif /* __ASM_IDLE_H */ diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h new file mode 100644 index 000000000..fce1843ce --- /dev/null +++ b/arch/loongarch/include/asm/inst.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_INST_H +#define _ASM_INST_H + +#include <linux/types.h> +#include <asm/asm.h> + +#define INSN_BREAK 0x002a0000 + +#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 +#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 +#define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000 + +#define ADDR_IMMSHIFT_LU52ID 52 +#define ADDR_IMMSHIFT_LU32ID 32 +#define ADDR_IMMSHIFT_ADDU16ID 16 + +#define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN) + +enum reg0i26_op { + b_op = 0x14, + bl_op = 0x15, +}; + +enum reg1i20_op { + lu12iw_op = 0x0a, + lu32id_op = 0x0b, + pcaddu12i_op = 0x0e, + pcaddu18i_op = 0x0f, +}; + +enum reg1i21_op { + beqz_op = 0x10, + bnez_op = 0x11, +}; + +enum reg2_op { + revb2h_op = 0x0c, + revb4h_op = 0x0d, + revb2w_op = 0x0e, + revbd_op = 0x0f, + revh2w_op = 0x10, + revhd_op = 0x11, +}; + +enum reg2i5_op { + slliw_op = 0x81, + srliw_op = 0x89, + sraiw_op = 0x91, +}; + +enum reg2i6_op { + sllid_op = 0x41, + srlid_op = 0x45, + sraid_op = 0x49, +}; + +enum reg2i12_op { + addiw_op = 0x0a, + addid_op = 0x0b, + lu52id_op = 0x0c, + andi_op = 0x0d, + ori_op = 0x0e, + xori_op = 0x0f, + ldb_op = 0xa0, + ldh_op = 0xa1, + ldw_op = 0xa2, + ldd_op = 0xa3, + stb_op = 0xa4, + sth_op = 0xa5, + stw_op = 0xa6, + std_op = 0xa7, + ldbu_op = 0xa8, + ldhu_op = 0xa9, + ldwu_op = 0xaa, +}; + +enum reg2i14_op { + llw_op = 0x20, + scw_op = 0x21, + lld_op = 0x22, + scd_op = 0x23, + ldptrw_op = 0x24, + stptrw_op = 0x25, + ldptrd_op = 0x26, + stptrd_op = 0x27, +}; + +enum reg2i16_op { + jirl_op = 0x13, + beq_op = 0x16, + bne_op = 0x17, + blt_op = 0x18, + bge_op = 0x19, + bltu_op = 0x1a, + bgeu_op = 0x1b, +}; + +enum reg2bstrd_op { + bstrinsd_op = 0x2, + bstrpickd_op = 0x3, +}; + +enum reg3_op { + addw_op = 0x20, + addd_op = 0x21, + subw_op = 0x22, + subd_op = 0x23, + nor_op = 0x28, + and_op = 0x29, + or_op = 0x2a, + xor_op = 0x2b, + orn_op = 0x2c, + andn_op = 0x2d, + sllw_op = 0x2e, + srlw_op = 0x2f, + sraw_op = 0x30, + slld_op = 0x31, + srld_op = 0x32, + srad_op = 0x33, + mulw_op = 0x38, + mulhw_op = 0x39, + mulhwu_op = 0x3a, + muld_op = 0x3b, + mulhd_op = 0x3c, + mulhdu_op = 0x3d, + divw_op = 0x40, + modw_op = 0x41, + divwu_op = 0x42, + modwu_op = 0x43, + divd_op = 0x44, + modd_op = 0x45, + divdu_op = 0x46, + moddu_op = 0x47, + ldxb_op = 0x7000, + ldxh_op = 0x7008, + ldxw_op = 0x7010, + ldxd_op = 0x7018, + stxb_op = 0x7020, + stxh_op = 0x7028, + stxw_op = 0x7030, + stxd_op = 0x7038, + ldxbu_op = 0x7040, + ldxhu_op = 0x7048, + ldxwu_op = 0x7050, + amswapw_op = 0x70c0, + amswapd_op = 0x70c1, + amaddw_op = 0x70c2, + amaddd_op = 0x70c3, + amandw_op = 0x70c4, + amandd_op = 0x70c5, + amorw_op = 0x70c6, + amord_op = 0x70c7, + amxorw_op = 0x70c8, + amxord_op = 0x70c9, +}; + +enum reg3sa2_op { + alslw_op = 0x02, + alslwu_op = 0x03, + alsld_op = 0x16, +}; + +struct reg0i26_format { + unsigned int immediate_h : 10; + unsigned int immediate_l : 16; + unsigned int opcode : 6; +}; + +struct reg1i20_format { + unsigned int rd : 5; + unsigned int immediate : 20; + unsigned int opcode : 7; +}; + +struct reg1i21_format { + unsigned int immediate_h : 5; + unsigned int rj : 5; + unsigned int immediate_l : 16; + unsigned int opcode : 6; +}; + +struct reg2_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int opcode : 22; +}; + +struct reg2i5_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 5; + unsigned int opcode : 17; +}; + +struct reg2i6_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 6; + unsigned int opcode : 16; +}; + +struct reg2i12_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 12; + unsigned int opcode : 10; +}; + +struct reg2i14_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 14; + unsigned int opcode : 8; +}; + +struct reg2i16_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 16; + unsigned int opcode : 6; +}; + +struct reg2bstrd_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int lsbd : 6; + unsigned int msbd : 6; + unsigned int opcode : 10; +}; + +struct reg3_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int rk : 5; + unsigned int opcode : 17; +}; + +struct reg3sa2_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int rk : 5; + unsigned int immediate : 2; + unsigned int opcode : 15; +}; + +union loongarch_instruction { + unsigned int word; + struct reg0i26_format reg0i26_format; + struct reg1i20_format reg1i20_format; + struct reg1i21_format reg1i21_format; + struct reg2_format reg2_format; + struct reg2i5_format reg2i5_format; + struct reg2i6_format reg2i6_format; + struct reg2i12_format reg2i12_format; + struct reg2i14_format reg2i14_format; + struct reg2i16_format reg2i16_format; + struct reg2bstrd_format reg2bstrd_format; + struct reg3_format reg3_format; + struct reg3sa2_format reg3sa2_format; +}; + +#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction) + +enum loongarch_gpr { + LOONGARCH_GPR_ZERO = 0, + LOONGARCH_GPR_RA = 1, + LOONGARCH_GPR_TP = 2, + LOONGARCH_GPR_SP = 3, + LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */ + LOONGARCH_GPR_A1, /* Reused as V1 for return value */ + LOONGARCH_GPR_A2, + LOONGARCH_GPR_A3, + LOONGARCH_GPR_A4, + LOONGARCH_GPR_A5, + LOONGARCH_GPR_A6, + LOONGARCH_GPR_A7, + LOONGARCH_GPR_T0 = 12, + LOONGARCH_GPR_T1, + LOONGARCH_GPR_T2, + LOONGARCH_GPR_T3, + LOONGARCH_GPR_T4, + LOONGARCH_GPR_T5, + LOONGARCH_GPR_T6, + LOONGARCH_GPR_T7, + LOONGARCH_GPR_T8, + LOONGARCH_GPR_FP = 22, + LOONGARCH_GPR_S0 = 23, + LOONGARCH_GPR_S1, + LOONGARCH_GPR_S2, + LOONGARCH_GPR_S3, + LOONGARCH_GPR_S4, + LOONGARCH_GPR_S5, + LOONGARCH_GPR_S6, + LOONGARCH_GPR_S7, + LOONGARCH_GPR_S8, + LOONGARCH_GPR_MAX +}; + +#define is_imm12_negative(val) is_imm_negative(val, 12) + +static inline bool is_imm_negative(unsigned long val, unsigned int bit) +{ + return val & (1UL << (bit - 1)); +} + +static inline bool is_branch_ins(union loongarch_instruction *ip) +{ + return ip->reg1i21_format.opcode >= beqz_op && + ip->reg1i21_format.opcode <= bgeu_op; +} + +static inline bool is_ra_save_ins(union loongarch_instruction *ip) +{ + /* st.d $ra, $sp, offset */ + return ip->reg2i12_format.opcode == std_op && + ip->reg2i12_format.rj == LOONGARCH_GPR_SP && + ip->reg2i12_format.rd == LOONGARCH_GPR_RA && + !is_imm12_negative(ip->reg2i12_format.immediate); +} + +static inline bool is_stack_alloc_ins(union loongarch_instruction *ip) +{ + /* addi.d $sp, $sp, -imm */ + return ip->reg2i12_format.opcode == addid_op && + ip->reg2i12_format.rj == LOONGARCH_GPR_SP && + ip->reg2i12_format.rd == LOONGARCH_GPR_SP && + is_imm12_negative(ip->reg2i12_format.immediate); +} + +u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm); +u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); +u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest); + +static inline bool signed_imm_check(long val, unsigned int bit) +{ + return -(1L << (bit - 1)) <= val && val < (1L << (bit - 1)); +} + +static inline bool unsigned_imm_check(unsigned long val, unsigned int bit) +{ + return val < (1UL << bit); +} + +#define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + int offset) \ +{ \ + unsigned int immediate_l, immediate_h; \ + \ + immediate_l = offset & 0xffff; \ + offset >>= 16; \ + immediate_h = offset & 0x3ff; \ + \ + insn->reg0i26_format.opcode = OP; \ + insn->reg0i26_format.immediate_l = immediate_l; \ + insn->reg0i26_format.immediate_h = immediate_h; \ +} + +DEF_EMIT_REG0I26_FORMAT(b, b_op) + +#define DEF_EMIT_REG1I20_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, int imm) \ +{ \ + insn->reg1i20_format.opcode = OP; \ + insn->reg1i20_format.immediate = imm; \ + insn->reg1i20_format.rd = rd; \ +} + +DEF_EMIT_REG1I20_FORMAT(lu12iw, lu12iw_op) +DEF_EMIT_REG1I20_FORMAT(lu32id, lu32id_op) +DEF_EMIT_REG1I20_FORMAT(pcaddu18i, pcaddu18i_op) + +#define DEF_EMIT_REG2_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj) \ +{ \ + insn->reg2_format.opcode = OP; \ + insn->reg2_format.rd = rd; \ + insn->reg2_format.rj = rj; \ +} + +DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op) +DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op) +DEF_EMIT_REG2_FORMAT(revbd, revbd_op) + +#define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i5_format.opcode = OP; \ + insn->reg2i5_format.immediate = imm; \ + insn->reg2i5_format.rd = rd; \ + insn->reg2i5_format.rj = rj; \ +} + +DEF_EMIT_REG2I5_FORMAT(slliw, slliw_op) +DEF_EMIT_REG2I5_FORMAT(srliw, srliw_op) +DEF_EMIT_REG2I5_FORMAT(sraiw, sraiw_op) + +#define DEF_EMIT_REG2I6_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i6_format.opcode = OP; \ + insn->reg2i6_format.immediate = imm; \ + insn->reg2i6_format.rd = rd; \ + insn->reg2i6_format.rj = rj; \ +} + +DEF_EMIT_REG2I6_FORMAT(sllid, sllid_op) +DEF_EMIT_REG2I6_FORMAT(srlid, srlid_op) +DEF_EMIT_REG2I6_FORMAT(sraid, sraid_op) + +#define DEF_EMIT_REG2I12_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i12_format.opcode = OP; \ + insn->reg2i12_format.immediate = imm; \ + insn->reg2i12_format.rd = rd; \ + insn->reg2i12_format.rj = rj; \ +} + +DEF_EMIT_REG2I12_FORMAT(addiw, addiw_op) +DEF_EMIT_REG2I12_FORMAT(addid, addid_op) +DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op) +DEF_EMIT_REG2I12_FORMAT(andi, andi_op) +DEF_EMIT_REG2I12_FORMAT(ori, ori_op) +DEF_EMIT_REG2I12_FORMAT(xori, xori_op) +DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op) +DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op) +DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op) +DEF_EMIT_REG2I12_FORMAT(ldd, ldd_op) +DEF_EMIT_REG2I12_FORMAT(stb, stb_op) +DEF_EMIT_REG2I12_FORMAT(sth, sth_op) +DEF_EMIT_REG2I12_FORMAT(stw, stw_op) +DEF_EMIT_REG2I12_FORMAT(std, std_op) + +#define DEF_EMIT_REG2I14_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i14_format.opcode = OP; \ + insn->reg2i14_format.immediate = imm; \ + insn->reg2i14_format.rd = rd; \ + insn->reg2i14_format.rj = rj; \ +} + +DEF_EMIT_REG2I14_FORMAT(llw, llw_op) +DEF_EMIT_REG2I14_FORMAT(scw, scw_op) +DEF_EMIT_REG2I14_FORMAT(lld, lld_op) +DEF_EMIT_REG2I14_FORMAT(scd, scd_op) +DEF_EMIT_REG2I14_FORMAT(ldptrw, ldptrw_op) +DEF_EMIT_REG2I14_FORMAT(stptrw, stptrw_op) +DEF_EMIT_REG2I14_FORMAT(ldptrd, ldptrd_op) +DEF_EMIT_REG2I14_FORMAT(stptrd, stptrd_op) + +#define DEF_EMIT_REG2I16_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rd, \ + int offset) \ +{ \ + insn->reg2i16_format.opcode = OP; \ + insn->reg2i16_format.immediate = offset; \ + insn->reg2i16_format.rj = rj; \ + insn->reg2i16_format.rd = rd; \ +} + +DEF_EMIT_REG2I16_FORMAT(beq, beq_op) +DEF_EMIT_REG2I16_FORMAT(bne, bne_op) +DEF_EMIT_REG2I16_FORMAT(blt, blt_op) +DEF_EMIT_REG2I16_FORMAT(bge, bge_op) +DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op) +DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op) +DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op) + +#define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int msbd, \ + int lsbd) \ +{ \ + insn->reg2bstrd_format.opcode = OP; \ + insn->reg2bstrd_format.msbd = msbd; \ + insn->reg2bstrd_format.lsbd = lsbd; \ + insn->reg2bstrd_format.rj = rj; \ + insn->reg2bstrd_format.rd = rd; \ +} + +DEF_EMIT_REG2BSTRD_FORMAT(bstrpickd, bstrpickd_op) + +#define DEF_EMIT_REG3_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rk) \ +{ \ + insn->reg3_format.opcode = OP; \ + insn->reg3_format.rd = rd; \ + insn->reg3_format.rj = rj; \ + insn->reg3_format.rk = rk; \ +} + +DEF_EMIT_REG3_FORMAT(addd, addd_op) +DEF_EMIT_REG3_FORMAT(subd, subd_op) +DEF_EMIT_REG3_FORMAT(muld, muld_op) +DEF_EMIT_REG3_FORMAT(divdu, divdu_op) +DEF_EMIT_REG3_FORMAT(moddu, moddu_op) +DEF_EMIT_REG3_FORMAT(and, and_op) +DEF_EMIT_REG3_FORMAT(or, or_op) +DEF_EMIT_REG3_FORMAT(xor, xor_op) +DEF_EMIT_REG3_FORMAT(sllw, sllw_op) +DEF_EMIT_REG3_FORMAT(slld, slld_op) +DEF_EMIT_REG3_FORMAT(srlw, srlw_op) +DEF_EMIT_REG3_FORMAT(srld, srld_op) +DEF_EMIT_REG3_FORMAT(sraw, sraw_op) +DEF_EMIT_REG3_FORMAT(srad, srad_op) +DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op) +DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op) +DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op) +DEF_EMIT_REG3_FORMAT(ldxd, ldxd_op) +DEF_EMIT_REG3_FORMAT(stxb, stxb_op) +DEF_EMIT_REG3_FORMAT(stxh, stxh_op) +DEF_EMIT_REG3_FORMAT(stxw, stxw_op) +DEF_EMIT_REG3_FORMAT(stxd, stxd_op) +DEF_EMIT_REG3_FORMAT(amaddw, amaddw_op) +DEF_EMIT_REG3_FORMAT(amaddd, amaddd_op) +DEF_EMIT_REG3_FORMAT(amandw, amandw_op) +DEF_EMIT_REG3_FORMAT(amandd, amandd_op) +DEF_EMIT_REG3_FORMAT(amorw, amorw_op) +DEF_EMIT_REG3_FORMAT(amord, amord_op) +DEF_EMIT_REG3_FORMAT(amxorw, amxorw_op) +DEF_EMIT_REG3_FORMAT(amxord, amxord_op) +DEF_EMIT_REG3_FORMAT(amswapw, amswapw_op) +DEF_EMIT_REG3_FORMAT(amswapd, amswapd_op) + +#define DEF_EMIT_REG3SA2_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rk, \ + int imm) \ +{ \ + insn->reg3sa2_format.opcode = OP; \ + insn->reg3sa2_format.immediate = imm; \ + insn->reg3sa2_format.rd = rd; \ + insn->reg3sa2_format.rj = rj; \ + insn->reg3sa2_format.rk = rk; \ +} + +DEF_EMIT_REG3SA2_FORMAT(alsld, alsld_op) + +#endif /* _ASM_INST_H */ diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h new file mode 100644 index 000000000..402a7d9e3 --- /dev/null +++ b/arch/loongarch/include/asm/io.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_IO_H +#define _ASM_IO_H + +#define ARCH_HAS_IOREMAP_WC + +#include <linux/kernel.h> +#include <linux/types.h> + +#include <asm/addrspace.h> +#include <asm/cpu.h> +#include <asm/page.h> +#include <asm/pgtable-bits.h> +#include <asm/string.h> + +/* + * Change "struct page" to physical address. + */ +#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) + +extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size); +extern void __init early_iounmap(void __iomem *addr, unsigned long size); + +#define early_memremap early_ioremap +#define early_memunmap early_iounmap + +#ifdef CONFIG_ARCH_IOREMAP + +static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, + unsigned long prot_val) +{ + if (prot_val & _CACHE_CC) + return (void __iomem *)(unsigned long)(CACHE_BASE + offset); + else + return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset); +} + +#define ioremap(offset, size) \ + ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC)) + +#define iounmap(addr) ((void)(addr)) + +#endif + +/* + * On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache(). + * They map bus memory into CPU space, the mapped memory is marked uncachable + * (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and + * cachable (_CACHE_CC) respectively for CPU access. + * + * @offset: bus address of the memory + * @size: size of the resource to map + */ +#define ioremap_wc(offset, size) \ + ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC)) + +#define ioremap_cache(offset, size) \ + ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) + +#define mmiowb() asm volatile ("dbar 0" ::: "memory") + +/* + * String version of I/O memory access operations. + */ +extern void __memset_io(volatile void __iomem *dst, int c, size_t count); +extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count); +extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count); +#define memset_io(c, v, l) __memset_io((c), (v), (l)) +#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l)) +#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l)) + +#include <asm-generic/io.h> + +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); +extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); + +#endif /* _ASM_IO_H */ diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h new file mode 100644 index 000000000..5332b1433 --- /dev/null +++ b/arch/loongarch/include/asm/irq.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_IRQ_H +#define _ASM_IRQ_H + +#include <linux/irqdomain.h> +#include <linux/irqreturn.h> + +#define IRQ_STACK_SIZE THREAD_SIZE +#define IRQ_STACK_START (IRQ_STACK_SIZE - 16) + +DECLARE_PER_CPU(unsigned long, irq_stack); + +/* + * The highest address on the IRQ stack contains a dummy frame which is + * structured as follows: + * + * top ------------ + * | task sp | <- irq_stack[cpu] + IRQ_STACK_START + * ------------ + * | | <- First frame of IRQ context + * ------------ + * + * task sp holds a copy of the task stack pointer where the struct pt_regs + * from exception entry can be found. + */ + +static inline bool on_irq_stack(int cpu, unsigned long sp) +{ + unsigned long low = per_cpu(irq_stack, cpu); + unsigned long high = low + IRQ_STACK_SIZE; + + return (low <= sp && sp <= high); +} + +void spurious_interrupt(void); + +#define NR_IRQS_LEGACY 16 + +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self); + +#define MAX_IO_PICS 2 +#define NR_IRQS (64 + (256 * MAX_IO_PICS)) + +struct acpi_vector_group { + int node; + int pci_segment; + struct irq_domain *parent; +}; +extern struct acpi_vector_group pch_group[MAX_IO_PICS]; +extern struct acpi_vector_group msi_group[MAX_IO_PICS]; + +#define CORES_PER_EIO_NODE 4 + +#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */ +#define LOONGSON_CPU_THSENS_VEC 14 /* CPU Thsens */ +#define LOONGSON_CPU_HT0_VEC 16 /* CPU HT0 irq vector base number */ +#define LOONGSON_CPU_HT1_VEC 24 /* CPU HT1 irq vector base number */ + +/* IRQ number definitions */ +#define LOONGSON_LPC_IRQ_BASE 0 +#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15) + +#define LOONGSON_CPU_IRQ_BASE 16 +#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14) + +#define LOONGSON_PCH_IRQ_BASE 64 +#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47) +#define LOONGSON_PCH_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 64 - 1) + +#define LOONGSON_MSI_IRQ_BASE (LOONGSON_PCH_IRQ_BASE + 64) +#define LOONGSON_MSI_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1) + +#define GSI_MIN_LPC_IRQ LOONGSON_LPC_IRQ_BASE +#define GSI_MAX_LPC_IRQ (LOONGSON_LPC_IRQ_BASE + 16 - 1) +#define GSI_MIN_CPU_IRQ LOONGSON_CPU_IRQ_BASE +#define GSI_MAX_CPU_IRQ (LOONGSON_CPU_IRQ_BASE + 48 - 1) +#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE +#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1) + +struct acpi_madt_lio_pic; +struct acpi_madt_eio_pic; +struct acpi_madt_ht_pic; +struct acpi_madt_bio_pic; +struct acpi_madt_msi_pic; +struct acpi_madt_lpc_pic; + +int liointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lio_pic *acpi_liointc); +int eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc); + +struct irq_domain *htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec); +int pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc); +int pch_msi_acpi_init(struct irq_domain *parent, + struct acpi_madt_msi_pic *acpi_pchmsi); +int pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic); +int find_pch_pic(u32 gsi); +struct fwnode_handle *get_pch_msi_handle(int pci_segment); + +extern struct acpi_madt_lio_pic *acpi_liointc; +extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS]; + +extern struct acpi_madt_ht_pic *acpi_htintc; +extern struct acpi_madt_lpc_pic *acpi_pchlpc; +extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS]; +extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS]; + +extern struct fwnode_handle *cpuintc_handle; +extern struct fwnode_handle *liointc_handle; +extern struct fwnode_handle *pch_lpc_handle; +extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; + +extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev); + +#include <asm-generic/irq.h> + +#endif /* _ASM_IRQ_H */ diff --git a/arch/loongarch/include/asm/irq_regs.h b/arch/loongarch/include/asm/irq_regs.h new file mode 100644 index 000000000..3d62d815b --- /dev/null +++ b/arch/loongarch/include/asm/irq_regs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_IRQ_REGS_H +#define __ASM_IRQ_REGS_H + +#define ARCH_HAS_OWN_IRQ_REGS + +#include <linux/thread_info.h> + +static inline struct pt_regs *get_irq_regs(void) +{ + return current_thread_info()->regs; +} + +static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) +{ + struct pt_regs *old_regs; + + old_regs = get_irq_regs(); + current_thread_info()->regs = new_regs; + + return old_regs; +} + +#endif /* __ASM_IRQ_REGS_H */ diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h new file mode 100644 index 000000000..319a8c616 --- /dev/null +++ b/arch/loongarch/include/asm/irqflags.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/stringify.h> +#include <asm/loongarch.h> + +static inline void arch_local_irq_enable(void) +{ + u32 flags = CSR_CRMD_IE; + __asm__ __volatile__( + "csrxchg %[val], %[mask], %[reg]\n\t" + : [val] "+r" (flags) + : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); +} + +static inline void arch_local_irq_disable(void) +{ + u32 flags = 0; + __asm__ __volatile__( + "csrxchg %[val], %[mask], %[reg]\n\t" + : [val] "+r" (flags) + : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); +} + +static inline unsigned long arch_local_irq_save(void) +{ + u32 flags = 0; + __asm__ __volatile__( + "csrxchg %[val], %[mask], %[reg]\n\t" + : [val] "+r" (flags) + : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + __asm__ __volatile__( + "csrxchg %[val], %[mask], %[reg]\n\t" + : [val] "+r" (flags) + : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); +} + +static inline unsigned long arch_local_save_flags(void) +{ + u32 flags; + __asm__ __volatile__( + "csrrd %[val], %[reg]\n\t" + : [val] "=r" (flags) + : [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); + return flags; +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & CSR_CRMD_IE); +} + +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* #ifndef __ASSEMBLY__ */ + +#endif /* _ASM_IRQFLAGS_H */ diff --git a/arch/loongarch/include/asm/kdebug.h b/arch/loongarch/include/asm/kdebug.h new file mode 100644 index 000000000..d721b4b82 --- /dev/null +++ b/arch/loongarch/include/asm/kdebug.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LOONGARCH_KDEBUG_H +#define _ASM_LOONGARCH_KDEBUG_H + +#include <linux/notifier.h> + +enum die_val { + DIE_OOPS = 1, + DIE_RI, + DIE_FP, + DIE_SIMD, + DIE_TRAP, + DIE_PAGE_FAULT, + DIE_BREAK, + DIE_SSTEPBP, + DIE_UPROBE, + DIE_UPROBE_XOL, +}; + +#endif /* _ASM_LOONGARCH_KDEBUG_H */ diff --git a/arch/loongarch/include/asm/kexec.h b/arch/loongarch/include/asm/kexec.h new file mode 100644 index 000000000..cf95cd3eb --- /dev/null +++ b/arch/loongarch/include/asm/kexec.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * kexec.h for kexec + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_KEXEC_H +#define _ASM_KEXEC_H + +#include <asm/stacktrace.h> +#include <asm/page.h> + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + /* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +/* Reserve a page for the control code buffer */ +#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_LOONGARCH + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + memcpy(newregs, oldregs, sizeof(*newregs)); + else + prepare_frametrace(newregs); +} + +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + unsigned long efi_boot; + unsigned long cmdline_ptr; + unsigned long systable_ptr; +}; + +typedef void (*do_kexec_t)(unsigned long efi_boot, + unsigned long cmdline_ptr, + unsigned long systable_ptr, + unsigned long start_addr, + unsigned long first_ind_entry); + +struct kimage; +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; +extern void kexec_reboot(void); + +#ifdef CONFIG_SMP +extern atomic_t kexec_ready_to_reboot; +extern const unsigned char kexec_smp_wait[]; +#endif + +#endif /* !_ASM_KEXEC_H */ diff --git a/arch/loongarch/include/asm/linkage.h b/arch/loongarch/include/asm/linkage.h new file mode 100644 index 000000000..81b0c4cfb --- /dev/null +++ b/arch/loongarch/include/asm/linkage.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 2 +#define __ALIGN_STR __stringify(__ALIGN) + +#define SYM_FUNC_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \ + .cfi_startproc; + +#define SYM_FUNC_START_NOALIGN(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \ + .cfi_startproc; + +#define SYM_FUNC_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \ + .cfi_startproc; + +#define SYM_FUNC_START_LOCAL_NOALIGN(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \ + .cfi_startproc; + +#define SYM_FUNC_START_WEAK(name) \ + SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \ + .cfi_startproc; + +#define SYM_FUNC_START_WEAK_NOALIGN(name) \ + SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \ + .cfi_startproc; + +#define SYM_FUNC_END(name) \ + .cfi_endproc; \ + SYM_END(name, SYM_T_FUNC) + +#endif diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h new file mode 100644 index 000000000..65fbbae9f --- /dev/null +++ b/arch/loongarch/include/asm/local.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ARCH_LOONGARCH_LOCAL_H +#define _ARCH_LOONGARCH_LOCAL_H + +#include <linux/percpu.h> +#include <linux/bitops.h> +#include <linux/atomic.h> +#include <asm/cmpxchg.h> + +typedef struct { + atomic_long_t a; +} local_t; + +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l, i) atomic_long_set(&(l)->a, (i)) + +#define local_add(i, l) atomic_long_add((i), (&(l)->a)) +#define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) + +/* + * Same as above, but return the result value + */ +static inline long local_add_return(long i, local_t *l) +{ + unsigned long result; + + __asm__ __volatile__( + " " __AMADD " %1, %2, %0 \n" + : "+ZB" (l->a.counter), "=&r" (result) + : "r" (i) + : "memory"); + result = result + i; + + return result; +} + +static inline long local_sub_return(long i, local_t *l) +{ + unsigned long result; + + __asm__ __volatile__( + " " __AMADD "%1, %2, %0 \n" + : "+ZB" (l->a.counter), "=&r" (result) + : "r" (-i) + : "memory"); + + result = result - i; + + return result; +} + +#define local_cmpxchg(l, o, n) \ + ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) +#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) + +/** + * local_add_unless - add unless the number is a given value + * @l: pointer of type local_t + * @a: the amount to add to l... + * @u: ...unless l is equal to u. + * + * Atomically adds @a to @l, so long as it was not @u. + * Returns non-zero if @l was not @u, and zero otherwise. + */ +#define local_add_unless(l, a, u) \ +({ \ + long c, old; \ + c = local_read(l); \ + while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define local_inc_not_zero(l) local_add_unless((l), 1, 0) + +#define local_dec_return(l) local_sub_return(1, (l)) +#define local_inc_return(l) local_add_return(1, (l)) + +/* + * local_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @l: pointer of type local_t + * + * Atomically subtracts @i from @l and returns + * true if the result is zero, or false for all + * other cases. + */ +#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0) + +/* + * local_inc_and_test - increment and test + * @l: pointer of type local_t + * + * Atomically increments @l by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define local_inc_and_test(l) (local_inc_return(l) == 0) + +/* + * local_dec_and_test - decrement by 1 and test + * @l: pointer of type local_t + * + * Atomically decrements @l by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) + +/* + * local_add_negative - add and test if negative + * @l: pointer of type local_t + * @i: integer value to add + * + * Atomically adds @i to @l and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#define local_add_negative(i, l) (local_add_return(i, (l)) < 0) + +/* Use these for per-cpu local_t variables: on some archs they are + * much more efficient than these naive implementations. Note they take + * a variable, not an address. + */ + +#define __local_inc(l) ((l)->a.counter++) +#define __local_dec(l) ((l)->a.counter++) +#define __local_add(i, l) ((l)->a.counter += (i)) +#define __local_sub(i, l) ((l)->a.counter -= (i)) + +#endif /* _ARCH_LOONGARCH_LOCAL_H */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h new file mode 100644 index 000000000..3d15fa5be --- /dev/null +++ b/arch/loongarch/include/asm/loongarch.h @@ -0,0 +1,1495 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LOONGARCH_H +#define _ASM_LOONGARCH_H + +#include <linux/bits.h> +#include <linux/linkage.h> +#include <linux/types.h> + +#ifndef __ASSEMBLY__ +#include <larchintrin.h> + +/* + * parse_r var, r - Helper assembler macro for parsing register names. + * + * This converts the register name in $n form provided in \r to the + * corresponding register number, which is assigned to the variable \var. It is + * needed to allow explicit encoding of instructions in inline assembly where + * registers are chosen by the compiler in $n form, allowing us to avoid using + * fixed register numbers. + * + * It also allows newer instructions (not implemented by the assembler) to be + * transparently implemented using assembler macros, instead of needing separate + * cases depending on toolchain support. + * + * Simple usage example: + * __asm__ __volatile__("parse_r addr, %0\n\t" + * "#invtlb op, 0, %0\n\t" + * ".word ((0x6498000) | (addr << 10) | (0 << 5) | op)" + * : "=r" (status); + */ + +/* Match an individual register number and assign to \var */ +#define _IFC_REG(n) \ + ".ifc \\r, $r" #n "\n\t" \ + "\\var = " #n "\n\t" \ + ".endif\n\t" + +__asm__(".macro parse_r var r\n\t" + "\\var = -1\n\t" + _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) + _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) + _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) + _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) + _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) + _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) + _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) + _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) + ".iflt \\var\n\t" + ".error \"Unable to parse register name \\r\"\n\t" + ".endif\n\t" + ".endm"); + +#undef _IFC_REG + +/* CPUCFG */ +static inline u32 read_cpucfg(u32 reg) +{ + return __cpucfg(reg); +} + +#endif /* !__ASSEMBLY__ */ + +#ifdef __ASSEMBLY__ + +/* LoongArch Registers */ +#define REG_ZERO 0x0 +#define REG_RA 0x1 +#define REG_TP 0x2 +#define REG_SP 0x3 +#define REG_A0 0x4 /* Reused as V0 for return value */ +#define REG_A1 0x5 /* Reused as V1 for return value */ +#define REG_A2 0x6 +#define REG_A3 0x7 +#define REG_A4 0x8 +#define REG_A5 0x9 +#define REG_A6 0xa +#define REG_A7 0xb +#define REG_T0 0xc +#define REG_T1 0xd +#define REG_T2 0xe +#define REG_T3 0xf +#define REG_T4 0x10 +#define REG_T5 0x11 +#define REG_T6 0x12 +#define REG_T7 0x13 +#define REG_T8 0x14 +#define REG_U0 0x15 /* Kernel uses it as percpu base */ +#define REG_FP 0x16 +#define REG_S0 0x17 +#define REG_S1 0x18 +#define REG_S2 0x19 +#define REG_S3 0x1a +#define REG_S4 0x1b +#define REG_S5 0x1c +#define REG_S6 0x1d +#define REG_S7 0x1e +#define REG_S8 0x1f + +#endif /* __ASSEMBLY__ */ + +/* Bit fields for CPUCFG registers */ +#define LOONGARCH_CPUCFG0 0x0 +#define CPUCFG0_PRID GENMASK(31, 0) + +#define LOONGARCH_CPUCFG1 0x1 +#define CPUCFG1_ISGR32 BIT(0) +#define CPUCFG1_ISGR64 BIT(1) +#define CPUCFG1_PAGING BIT(2) +#define CPUCFG1_IOCSR BIT(3) +#define CPUCFG1_PABITS GENMASK(11, 4) +#define CPUCFG1_VABITS GENMASK(19, 12) +#define CPUCFG1_UAL BIT(20) +#define CPUCFG1_RI BIT(21) +#define CPUCFG1_EP BIT(22) +#define CPUCFG1_RPLV BIT(23) +#define CPUCFG1_HUGEPG BIT(24) +#define CPUCFG1_CRC32 BIT(25) +#define CPUCFG1_MSGINT BIT(26) + +#define LOONGARCH_CPUCFG2 0x2 +#define CPUCFG2_FP BIT(0) +#define CPUCFG2_FPSP BIT(1) +#define CPUCFG2_FPDP BIT(2) +#define CPUCFG2_FPVERS GENMASK(5, 3) +#define CPUCFG2_LSX BIT(6) +#define CPUCFG2_LASX BIT(7) +#define CPUCFG2_COMPLEX BIT(8) +#define CPUCFG2_CRYPTO BIT(9) +#define CPUCFG2_LVZP BIT(10) +#define CPUCFG2_LVZVER GENMASK(13, 11) +#define CPUCFG2_LLFTP BIT(14) +#define CPUCFG2_LLFTPREV GENMASK(17, 15) +#define CPUCFG2_X86BT BIT(18) +#define CPUCFG2_ARMBT BIT(19) +#define CPUCFG2_MIPSBT BIT(20) +#define CPUCFG2_LSPW BIT(21) +#define CPUCFG2_LAM BIT(22) + +#define LOONGARCH_CPUCFG3 0x3 +#define CPUCFG3_CCDMA BIT(0) +#define CPUCFG3_SFB BIT(1) +#define CPUCFG3_UCACC BIT(2) +#define CPUCFG3_LLEXC BIT(3) +#define CPUCFG3_SCDLY BIT(4) +#define CPUCFG3_LLDBAR BIT(5) +#define CPUCFG3_ITLBT BIT(6) +#define CPUCFG3_ICACHET BIT(7) +#define CPUCFG3_SPW_LVL GENMASK(10, 8) +#define CPUCFG3_SPW_HG_HF BIT(11) +#define CPUCFG3_RVA BIT(12) +#define CPUCFG3_RVAMAX GENMASK(16, 13) + +#define LOONGARCH_CPUCFG4 0x4 +#define CPUCFG4_CCFREQ GENMASK(31, 0) + +#define LOONGARCH_CPUCFG5 0x5 +#define CPUCFG5_CCMUL GENMASK(15, 0) +#define CPUCFG5_CCDIV GENMASK(31, 16) + +#define LOONGARCH_CPUCFG6 0x6 +#define CPUCFG6_PMP BIT(0) +#define CPUCFG6_PAMVER GENMASK(3, 1) +#define CPUCFG6_PMNUM GENMASK(7, 4) +#define CPUCFG6_PMBITS GENMASK(13, 8) +#define CPUCFG6_UPM BIT(14) + +#define LOONGARCH_CPUCFG16 0x10 +#define CPUCFG16_L1_IUPRE BIT(0) +#define CPUCFG16_L1_IUUNIFY BIT(1) +#define CPUCFG16_L1_DPRE BIT(2) +#define CPUCFG16_L2_IUPRE BIT(3) +#define CPUCFG16_L2_IUUNIFY BIT(4) +#define CPUCFG16_L2_IUPRIV BIT(5) +#define CPUCFG16_L2_IUINCL BIT(6) +#define CPUCFG16_L2_DPRE BIT(7) +#define CPUCFG16_L2_DPRIV BIT(8) +#define CPUCFG16_L2_DINCL BIT(9) +#define CPUCFG16_L3_IUPRE BIT(10) +#define CPUCFG16_L3_IUUNIFY BIT(11) +#define CPUCFG16_L3_IUPRIV BIT(12) +#define CPUCFG16_L3_IUINCL BIT(13) +#define CPUCFG16_L3_DPRE BIT(14) +#define CPUCFG16_L3_DPRIV BIT(15) +#define CPUCFG16_L3_DINCL BIT(16) + +#define LOONGARCH_CPUCFG17 0x11 +#define LOONGARCH_CPUCFG18 0x12 +#define LOONGARCH_CPUCFG19 0x13 +#define LOONGARCH_CPUCFG20 0x14 +#define CPUCFG_CACHE_WAYS_M GENMASK(15, 0) +#define CPUCFG_CACHE_SETS_M GENMASK(23, 16) +#define CPUCFG_CACHE_LSIZE_M GENMASK(30, 24) +#define CPUCFG_CACHE_WAYS 0 +#define CPUCFG_CACHE_SETS 16 +#define CPUCFG_CACHE_LSIZE 24 + +#define LOONGARCH_CPUCFG48 0x30 +#define CPUCFG48_MCSR_LCK BIT(0) +#define CPUCFG48_NAP_EN BIT(1) +#define CPUCFG48_VFPU_CG BIT(2) +#define CPUCFG48_RAM_CG BIT(3) + +#ifndef __ASSEMBLY__ + +/* CSR */ +static __always_inline u32 csr_read32(u32 reg) +{ + return __csrrd_w(reg); +} + +static __always_inline u64 csr_read64(u32 reg) +{ + return __csrrd_d(reg); +} + +static __always_inline void csr_write32(u32 val, u32 reg) +{ + __csrwr_w(val, reg); +} + +static __always_inline void csr_write64(u64 val, u32 reg) +{ + __csrwr_d(val, reg); +} + +static __always_inline u32 csr_xchg32(u32 val, u32 mask, u32 reg) +{ + return __csrxchg_w(val, mask, reg); +} + +static __always_inline u64 csr_xchg64(u64 val, u64 mask, u32 reg) +{ + return __csrxchg_d(val, mask, reg); +} + +/* IOCSR */ +static __always_inline u32 iocsr_read32(u32 reg) +{ + return __iocsrrd_w(reg); +} + +static __always_inline u64 iocsr_read64(u32 reg) +{ + return __iocsrrd_d(reg); +} + +static __always_inline void iocsr_write32(u32 val, u32 reg) +{ + __iocsrwr_w(val, reg); +} + +static __always_inline void iocsr_write64(u64 val, u32 reg) +{ + __iocsrwr_d(val, reg); +} + +#endif /* !__ASSEMBLY__ */ + +/* CSR register number */ + +/* Basic CSR registers */ +#define LOONGARCH_CSR_CRMD 0x0 /* Current mode info */ +#define CSR_CRMD_WE_SHIFT 9 +#define CSR_CRMD_WE (_ULCAST_(0x1) << CSR_CRMD_WE_SHIFT) +#define CSR_CRMD_DACM_SHIFT 7 +#define CSR_CRMD_DACM_WIDTH 2 +#define CSR_CRMD_DACM (_ULCAST_(0x3) << CSR_CRMD_DACM_SHIFT) +#define CSR_CRMD_DACF_SHIFT 5 +#define CSR_CRMD_DACF_WIDTH 2 +#define CSR_CRMD_DACF (_ULCAST_(0x3) << CSR_CRMD_DACF_SHIFT) +#define CSR_CRMD_PG_SHIFT 4 +#define CSR_CRMD_PG (_ULCAST_(0x1) << CSR_CRMD_PG_SHIFT) +#define CSR_CRMD_DA_SHIFT 3 +#define CSR_CRMD_DA (_ULCAST_(0x1) << CSR_CRMD_DA_SHIFT) +#define CSR_CRMD_IE_SHIFT 2 +#define CSR_CRMD_IE (_ULCAST_(0x1) << CSR_CRMD_IE_SHIFT) +#define CSR_CRMD_PLV_SHIFT 0 +#define CSR_CRMD_PLV_WIDTH 2 +#define CSR_CRMD_PLV (_ULCAST_(0x3) << CSR_CRMD_PLV_SHIFT) + +#define PLV_KERN 0 +#define PLV_USER 3 +#define PLV_MASK 0x3 + +#define LOONGARCH_CSR_PRMD 0x1 /* Prev-exception mode info */ +#define CSR_PRMD_PWE_SHIFT 3 +#define CSR_PRMD_PWE (_ULCAST_(0x1) << CSR_PRMD_PWE_SHIFT) +#define CSR_PRMD_PIE_SHIFT 2 +#define CSR_PRMD_PIE (_ULCAST_(0x1) << CSR_PRMD_PIE_SHIFT) +#define CSR_PRMD_PPLV_SHIFT 0 +#define CSR_PRMD_PPLV_WIDTH 2 +#define CSR_PRMD_PPLV (_ULCAST_(0x3) << CSR_PRMD_PPLV_SHIFT) + +#define LOONGARCH_CSR_EUEN 0x2 /* Extended unit enable */ +#define CSR_EUEN_LBTEN_SHIFT 3 +#define CSR_EUEN_LBTEN (_ULCAST_(0x1) << CSR_EUEN_LBTEN_SHIFT) +#define CSR_EUEN_LASXEN_SHIFT 2 +#define CSR_EUEN_LASXEN (_ULCAST_(0x1) << CSR_EUEN_LASXEN_SHIFT) +#define CSR_EUEN_LSXEN_SHIFT 1 +#define CSR_EUEN_LSXEN (_ULCAST_(0x1) << CSR_EUEN_LSXEN_SHIFT) +#define CSR_EUEN_FPEN_SHIFT 0 +#define CSR_EUEN_FPEN (_ULCAST_(0x1) << CSR_EUEN_FPEN_SHIFT) + +#define LOONGARCH_CSR_MISC 0x3 /* Misc config */ + +#define LOONGARCH_CSR_ECFG 0x4 /* Exception config */ +#define CSR_ECFG_VS_SHIFT 16 +#define CSR_ECFG_VS_WIDTH 3 +#define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT) +#define CSR_ECFG_IM_SHIFT 0 +#define CSR_ECFG_IM_WIDTH 13 +#define CSR_ECFG_IM (_ULCAST_(0x1fff) << CSR_ECFG_IM_SHIFT) + +#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */ +#define CSR_ESTAT_ESUBCODE_SHIFT 22 +#define CSR_ESTAT_ESUBCODE_WIDTH 9 +#define CSR_ESTAT_ESUBCODE (_ULCAST_(0x1ff) << CSR_ESTAT_ESUBCODE_SHIFT) +#define CSR_ESTAT_EXC_SHIFT 16 +#define CSR_ESTAT_EXC_WIDTH 6 +#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT) +#define CSR_ESTAT_IS_SHIFT 0 +#define CSR_ESTAT_IS_WIDTH 15 +#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT) + +#define LOONGARCH_CSR_ERA 0x6 /* ERA */ + +#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */ + +#define LOONGARCH_CSR_BADI 0x8 /* Bad instruction */ + +#define LOONGARCH_CSR_EENTRY 0xc /* Exception entry */ + +/* TLB related CSR registers */ +#define LOONGARCH_CSR_TLBIDX 0x10 /* TLB Index, EHINV, PageSize, NP */ +#define CSR_TLBIDX_EHINV_SHIFT 31 +#define CSR_TLBIDX_EHINV (_ULCAST_(1) << CSR_TLBIDX_EHINV_SHIFT) +#define CSR_TLBIDX_PS_SHIFT 24 +#define CSR_TLBIDX_PS_WIDTH 6 +#define CSR_TLBIDX_PS (_ULCAST_(0x3f) << CSR_TLBIDX_PS_SHIFT) +#define CSR_TLBIDX_IDX_SHIFT 0 +#define CSR_TLBIDX_IDX_WIDTH 12 +#define CSR_TLBIDX_IDX (_ULCAST_(0xfff) << CSR_TLBIDX_IDX_SHIFT) +#define CSR_TLBIDX_SIZEM 0x3f000000 +#define CSR_TLBIDX_SIZE CSR_TLBIDX_PS_SHIFT +#define CSR_TLBIDX_IDXM 0xfff +#define CSR_INVALID_ENTRY(e) (CSR_TLBIDX_EHINV | e) + +#define LOONGARCH_CSR_TLBEHI 0x11 /* TLB EntryHi */ + +#define LOONGARCH_CSR_TLBELO0 0x12 /* TLB EntryLo0 */ +#define CSR_TLBLO0_RPLV_SHIFT 63 +#define CSR_TLBLO0_RPLV (_ULCAST_(0x1) << CSR_TLBLO0_RPLV_SHIFT) +#define CSR_TLBLO0_NX_SHIFT 62 +#define CSR_TLBLO0_NX (_ULCAST_(0x1) << CSR_TLBLO0_NX_SHIFT) +#define CSR_TLBLO0_NR_SHIFT 61 +#define CSR_TLBLO0_NR (_ULCAST_(0x1) << CSR_TLBLO0_NR_SHIFT) +#define CSR_TLBLO0_PFN_SHIFT 12 +#define CSR_TLBLO0_PFN_WIDTH 36 +#define CSR_TLBLO0_PFN (_ULCAST_(0xfffffffff) << CSR_TLBLO0_PFN_SHIFT) +#define CSR_TLBLO0_GLOBAL_SHIFT 6 +#define CSR_TLBLO0_GLOBAL (_ULCAST_(0x1) << CSR_TLBLO0_GLOBAL_SHIFT) +#define CSR_TLBLO0_CCA_SHIFT 4 +#define CSR_TLBLO0_CCA_WIDTH 2 +#define CSR_TLBLO0_CCA (_ULCAST_(0x3) << CSR_TLBLO0_CCA_SHIFT) +#define CSR_TLBLO0_PLV_SHIFT 2 +#define CSR_TLBLO0_PLV_WIDTH 2 +#define CSR_TLBLO0_PLV (_ULCAST_(0x3) << CSR_TLBLO0_PLV_SHIFT) +#define CSR_TLBLO0_WE_SHIFT 1 +#define CSR_TLBLO0_WE (_ULCAST_(0x1) << CSR_TLBLO0_WE_SHIFT) +#define CSR_TLBLO0_V_SHIFT 0 +#define CSR_TLBLO0_V (_ULCAST_(0x1) << CSR_TLBLO0_V_SHIFT) + +#define LOONGARCH_CSR_TLBELO1 0x13 /* TLB EntryLo1 */ +#define CSR_TLBLO1_RPLV_SHIFT 63 +#define CSR_TLBLO1_RPLV (_ULCAST_(0x1) << CSR_TLBLO1_RPLV_SHIFT) +#define CSR_TLBLO1_NX_SHIFT 62 +#define CSR_TLBLO1_NX (_ULCAST_(0x1) << CSR_TLBLO1_NX_SHIFT) +#define CSR_TLBLO1_NR_SHIFT 61 +#define CSR_TLBLO1_NR (_ULCAST_(0x1) << CSR_TLBLO1_NR_SHIFT) +#define CSR_TLBLO1_PFN_SHIFT 12 +#define CSR_TLBLO1_PFN_WIDTH 36 +#define CSR_TLBLO1_PFN (_ULCAST_(0xfffffffff) << CSR_TLBLO1_PFN_SHIFT) +#define CSR_TLBLO1_GLOBAL_SHIFT 6 +#define CSR_TLBLO1_GLOBAL (_ULCAST_(0x1) << CSR_TLBLO1_GLOBAL_SHIFT) +#define CSR_TLBLO1_CCA_SHIFT 4 +#define CSR_TLBLO1_CCA_WIDTH 2 +#define CSR_TLBLO1_CCA (_ULCAST_(0x3) << CSR_TLBLO1_CCA_SHIFT) +#define CSR_TLBLO1_PLV_SHIFT 2 +#define CSR_TLBLO1_PLV_WIDTH 2 +#define CSR_TLBLO1_PLV (_ULCAST_(0x3) << CSR_TLBLO1_PLV_SHIFT) +#define CSR_TLBLO1_WE_SHIFT 1 +#define CSR_TLBLO1_WE (_ULCAST_(0x1) << CSR_TLBLO1_WE_SHIFT) +#define CSR_TLBLO1_V_SHIFT 0 +#define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT) + +#define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */ +#define CSR_GTLBC_RID_SHIFT 16 +#define CSR_GTLBC_RID_WIDTH 8 +#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT) +#define CSR_GTLBC_TOTI_SHIFT 13 +#define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT) +#define CSR_GTLBC_USERID_SHIFT 12 +#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT) +#define CSR_GTLBC_GMTLBSZ_SHIFT 0 +#define CSR_GTLBC_GMTLBSZ_WIDTH 6 +#define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT) + +#define LOONGARCH_CSR_TRGP 0x16 /* TLBR read guest info */ +#define CSR_TRGP_RID_SHIFT 16 +#define CSR_TRGP_RID_WIDTH 8 +#define CSR_TRGP_RID (_ULCAST_(0xff) << CSR_TRGP_RID_SHIFT) +#define CSR_TRGP_GTLB_SHIFT 0 +#define CSR_TRGP_GTLB (1 << CSR_TRGP_GTLB_SHIFT) + +#define LOONGARCH_CSR_ASID 0x18 /* ASID */ +#define CSR_ASID_BIT_SHIFT 16 /* ASIDBits */ +#define CSR_ASID_BIT_WIDTH 8 +#define CSR_ASID_BIT (_ULCAST_(0xff) << CSR_ASID_BIT_SHIFT) +#define CSR_ASID_ASID_SHIFT 0 +#define CSR_ASID_ASID_WIDTH 10 +#define CSR_ASID_ASID (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT) + +#define LOONGARCH_CSR_PGDL 0x19 /* Page table base address when VA[47] = 0 */ + +#define LOONGARCH_CSR_PGDH 0x1a /* Page table base address when VA[47] = 1 */ + +#define LOONGARCH_CSR_PGD 0x1b /* Page table base */ + +#define LOONGARCH_CSR_PWCTL0 0x1c /* PWCtl0 */ +#define CSR_PWCTL0_PTEW_SHIFT 30 +#define CSR_PWCTL0_PTEW_WIDTH 2 +#define CSR_PWCTL0_PTEW (_ULCAST_(0x3) << CSR_PWCTL0_PTEW_SHIFT) +#define CSR_PWCTL0_DIR1WIDTH_SHIFT 25 +#define CSR_PWCTL0_DIR1WIDTH_WIDTH 5 +#define CSR_PWCTL0_DIR1WIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1WIDTH_SHIFT) +#define CSR_PWCTL0_DIR1BASE_SHIFT 20 +#define CSR_PWCTL0_DIR1BASE_WIDTH 5 +#define CSR_PWCTL0_DIR1BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1BASE_SHIFT) +#define CSR_PWCTL0_DIR0WIDTH_SHIFT 15 +#define CSR_PWCTL0_DIR0WIDTH_WIDTH 5 +#define CSR_PWCTL0_DIR0WIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0WIDTH_SHIFT) +#define CSR_PWCTL0_DIR0BASE_SHIFT 10 +#define CSR_PWCTL0_DIR0BASE_WIDTH 5 +#define CSR_PWCTL0_DIR0BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0BASE_SHIFT) +#define CSR_PWCTL0_PTWIDTH_SHIFT 5 +#define CSR_PWCTL0_PTWIDTH_WIDTH 5 +#define CSR_PWCTL0_PTWIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_PTWIDTH_SHIFT) +#define CSR_PWCTL0_PTBASE_SHIFT 0 +#define CSR_PWCTL0_PTBASE_WIDTH 5 +#define CSR_PWCTL0_PTBASE (_ULCAST_(0x1f) << CSR_PWCTL0_PTBASE_SHIFT) + +#define LOONGARCH_CSR_PWCTL1 0x1d /* PWCtl1 */ +#define CSR_PWCTL1_DIR3WIDTH_SHIFT 18 +#define CSR_PWCTL1_DIR3WIDTH_WIDTH 5 +#define CSR_PWCTL1_DIR3WIDTH (_ULCAST_(0x1f) << CSR_PWCTL1_DIR3WIDTH_SHIFT) +#define CSR_PWCTL1_DIR3BASE_SHIFT 12 +#define CSR_PWCTL1_DIR3BASE_WIDTH 5 +#define CSR_PWCTL1_DIR3BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR3BASE_SHIFT) +#define CSR_PWCTL1_DIR2WIDTH_SHIFT 6 +#define CSR_PWCTL1_DIR2WIDTH_WIDTH 5 +#define CSR_PWCTL1_DIR2WIDTH (_ULCAST_(0x1f) << CSR_PWCTL1_DIR2WIDTH_SHIFT) +#define CSR_PWCTL1_DIR2BASE_SHIFT 0 +#define CSR_PWCTL1_DIR2BASE_WIDTH 5 +#define CSR_PWCTL1_DIR2BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR2BASE_SHIFT) + +#define LOONGARCH_CSR_STLBPGSIZE 0x1e +#define CSR_STLBPGSIZE_PS_WIDTH 6 +#define CSR_STLBPGSIZE_PS (_ULCAST_(0x3f)) + +#define LOONGARCH_CSR_RVACFG 0x1f +#define CSR_RVACFG_RDVA_WIDTH 4 +#define CSR_RVACFG_RDVA (_ULCAST_(0xf)) + +/* Config CSR registers */ +#define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */ +#define CSR_CPUID_COREID_WIDTH 9 +#define CSR_CPUID_COREID _ULCAST_(0x1ff) + +#define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */ +#define CSR_CONF1_VSMAX_SHIFT 12 +#define CSR_CONF1_VSMAX_WIDTH 3 +#define CSR_CONF1_VSMAX (_ULCAST_(7) << CSR_CONF1_VSMAX_SHIFT) +#define CSR_CONF1_TMRBITS_SHIFT 4 +#define CSR_CONF1_TMRBITS_WIDTH 8 +#define CSR_CONF1_TMRBITS (_ULCAST_(0xff) << CSR_CONF1_TMRBITS_SHIFT) +#define CSR_CONF1_KSNUM_WIDTH 4 +#define CSR_CONF1_KSNUM _ULCAST_(0xf) + +#define LOONGARCH_CSR_PRCFG2 0x22 /* Config2 */ +#define CSR_CONF2_PGMASK_SUPP 0x3ffff000 + +#define LOONGARCH_CSR_PRCFG3 0x23 /* Config3 */ +#define CSR_CONF3_STLBIDX_SHIFT 20 +#define CSR_CONF3_STLBIDX_WIDTH 6 +#define CSR_CONF3_STLBIDX (_ULCAST_(0x3f) << CSR_CONF3_STLBIDX_SHIFT) +#define CSR_CONF3_STLBWAYS_SHIFT 12 +#define CSR_CONF3_STLBWAYS_WIDTH 8 +#define CSR_CONF3_STLBWAYS (_ULCAST_(0xff) << CSR_CONF3_STLBWAYS_SHIFT) +#define CSR_CONF3_MTLBSIZE_SHIFT 4 +#define CSR_CONF3_MTLBSIZE_WIDTH 8 +#define CSR_CONF3_MTLBSIZE (_ULCAST_(0xff) << CSR_CONF3_MTLBSIZE_SHIFT) +#define CSR_CONF3_TLBTYPE_SHIFT 0 +#define CSR_CONF3_TLBTYPE_WIDTH 4 +#define CSR_CONF3_TLBTYPE (_ULCAST_(0xf) << CSR_CONF3_TLBTYPE_SHIFT) + +/* KSave registers */ +#define LOONGARCH_CSR_KS0 0x30 +#define LOONGARCH_CSR_KS1 0x31 +#define LOONGARCH_CSR_KS2 0x32 +#define LOONGARCH_CSR_KS3 0x33 +#define LOONGARCH_CSR_KS4 0x34 +#define LOONGARCH_CSR_KS5 0x35 +#define LOONGARCH_CSR_KS6 0x36 +#define LOONGARCH_CSR_KS7 0x37 +#define LOONGARCH_CSR_KS8 0x38 + +/* Exception allocated KS0, KS1 and KS2 statically */ +#define EXCEPTION_KS0 LOONGARCH_CSR_KS0 +#define EXCEPTION_KS1 LOONGARCH_CSR_KS1 +#define EXCEPTION_KS2 LOONGARCH_CSR_KS2 +#define EXC_KSAVE_MASK (1 << 0 | 1 << 1 | 1 << 2) + +/* Percpu-data base allocated KS3 statically */ +#define PERCPU_BASE_KS LOONGARCH_CSR_KS3 +#define PERCPU_KSAVE_MASK (1 << 3) + +/* KVM allocated KS4 and KS5 statically */ +#define KVM_VCPU_KS LOONGARCH_CSR_KS4 +#define KVM_TEMP_KS LOONGARCH_CSR_KS5 +#define KVM_KSAVE_MASK (1 << 4 | 1 << 5) + +/* Timer registers */ +#define LOONGARCH_CSR_TMID 0x40 /* Timer ID */ + +#define LOONGARCH_CSR_TCFG 0x41 /* Timer config */ +#define CSR_TCFG_VAL_SHIFT 2 +#define CSR_TCFG_VAL_WIDTH 48 +#define CSR_TCFG_VAL (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT) +#define CSR_TCFG_PERIOD_SHIFT 1 +#define CSR_TCFG_PERIOD (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT) +#define CSR_TCFG_EN (_ULCAST_(0x1)) + +#define LOONGARCH_CSR_TVAL 0x42 /* Timer value */ + +#define LOONGARCH_CSR_CNTC 0x43 /* Timer offset */ + +#define LOONGARCH_CSR_TINTCLR 0x44 /* Timer interrupt clear */ +#define CSR_TINTCLR_TI_SHIFT 0 +#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT) + +/* Guest registers */ +#define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */ +#define CSR_GSTAT_GID_SHIFT 16 +#define CSR_GSTAT_GID_WIDTH 8 +#define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT) +#define CSR_GSTAT_GIDBIT_SHIFT 4 +#define CSR_GSTAT_GIDBIT_WIDTH 6 +#define CSR_GSTAT_GIDBIT (_ULCAST_(0x3f) << CSR_GSTAT_GIDBIT_SHIFT) +#define CSR_GSTAT_PVM_SHIFT 1 +#define CSR_GSTAT_PVM (_ULCAST_(0x1) << CSR_GSTAT_PVM_SHIFT) +#define CSR_GSTAT_VM_SHIFT 0 +#define CSR_GSTAT_VM (_ULCAST_(0x1) << CSR_GSTAT_VM_SHIFT) + +#define LOONGARCH_CSR_GCFG 0x51 /* Guest config */ +#define CSR_GCFG_GPERF_SHIFT 24 +#define CSR_GCFG_GPERF_WIDTH 3 +#define CSR_GCFG_GPERF (_ULCAST_(0x7) << CSR_GCFG_GPERF_SHIFT) +#define CSR_GCFG_GCI_SHIFT 20 +#define CSR_GCFG_GCI_WIDTH 2 +#define CSR_GCFG_GCI (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_ALL (_ULCAST_(0x0) << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_HIT (_ULCAST_(0x1) << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_SECURE (_ULCAST_(0x2) << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCIP_SHIFT 16 +#define CSR_GCFG_GCIP (_ULCAST_(0xf) << CSR_GCFG_GCIP_SHIFT) +#define CSR_GCFG_GCIP_ALL (_ULCAST_(0x1) << CSR_GCFG_GCIP_SHIFT) +#define CSR_GCFG_GCIP_HIT (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 1)) +#define CSR_GCFG_GCIP_SECURE (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 2)) +#define CSR_GCFG_TORU_SHIFT 15 +#define CSR_GCFG_TORU (_ULCAST_(0x1) << CSR_GCFG_TORU_SHIFT) +#define CSR_GCFG_TORUP_SHIFT 14 +#define CSR_GCFG_TORUP (_ULCAST_(0x1) << CSR_GCFG_TORUP_SHIFT) +#define CSR_GCFG_TOP_SHIFT 13 +#define CSR_GCFG_TOP (_ULCAST_(0x1) << CSR_GCFG_TOP_SHIFT) +#define CSR_GCFG_TOPP_SHIFT 12 +#define CSR_GCFG_TOPP (_ULCAST_(0x1) << CSR_GCFG_TOPP_SHIFT) +#define CSR_GCFG_TOE_SHIFT 11 +#define CSR_GCFG_TOE (_ULCAST_(0x1) << CSR_GCFG_TOE_SHIFT) +#define CSR_GCFG_TOEP_SHIFT 10 +#define CSR_GCFG_TOEP (_ULCAST_(0x1) << CSR_GCFG_TOEP_SHIFT) +#define CSR_GCFG_TIT_SHIFT 9 +#define CSR_GCFG_TIT (_ULCAST_(0x1) << CSR_GCFG_TIT_SHIFT) +#define CSR_GCFG_TITP_SHIFT 8 +#define CSR_GCFG_TITP (_ULCAST_(0x1) << CSR_GCFG_TITP_SHIFT) +#define CSR_GCFG_SIT_SHIFT 7 +#define CSR_GCFG_SIT (_ULCAST_(0x1) << CSR_GCFG_SIT_SHIFT) +#define CSR_GCFG_SITP_SHIFT 6 +#define CSR_GCFG_SITP (_ULCAST_(0x1) << CSR_GCFG_SITP_SHIFT) +#define CSR_GCFG_MATC_SHITF 4 +#define CSR_GCFG_MATC_WIDTH 2 +#define CSR_GCFG_MATC_MASK (_ULCAST_(0x3) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF) + +#define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */ +#define CSR_GINTC_HC_SHIFT 16 +#define CSR_GINTC_HC_WIDTH 8 +#define CSR_GINTC_HC (_ULCAST_(0xff) << CSR_GINTC_HC_SHIFT) +#define CSR_GINTC_PIP_SHIFT 8 +#define CSR_GINTC_PIP_WIDTH 8 +#define CSR_GINTC_PIP (_ULCAST_(0xff) << CSR_GINTC_PIP_SHIFT) +#define CSR_GINTC_VIP_SHIFT 0 +#define CSR_GINTC_VIP_WIDTH 8 +#define CSR_GINTC_VIP (_ULCAST_(0xff)) + +#define LOONGARCH_CSR_GCNTC 0x53 /* Guest timer offset */ + +/* LLBCTL register */ +#define LOONGARCH_CSR_LLBCTL 0x60 /* LLBit control */ +#define CSR_LLBCTL_ROLLB_SHIFT 0 +#define CSR_LLBCTL_ROLLB (_ULCAST_(1) << CSR_LLBCTL_ROLLB_SHIFT) +#define CSR_LLBCTL_WCLLB_SHIFT 1 +#define CSR_LLBCTL_WCLLB (_ULCAST_(1) << CSR_LLBCTL_WCLLB_SHIFT) +#define CSR_LLBCTL_KLO_SHIFT 2 +#define CSR_LLBCTL_KLO (_ULCAST_(1) << CSR_LLBCTL_KLO_SHIFT) + +/* Implement dependent */ +#define LOONGARCH_CSR_IMPCTL1 0x80 /* Loongson config1 */ +#define CSR_MISPEC_SHIFT 20 +#define CSR_MISPEC_WIDTH 8 +#define CSR_MISPEC (_ULCAST_(0xff) << CSR_MISPEC_SHIFT) +#define CSR_SSEN_SHIFT 18 +#define CSR_SSEN (_ULCAST_(1) << CSR_SSEN_SHIFT) +#define CSR_SCRAND_SHIFT 17 +#define CSR_SCRAND (_ULCAST_(1) << CSR_SCRAND_SHIFT) +#define CSR_LLEXCL_SHIFT 16 +#define CSR_LLEXCL (_ULCAST_(1) << CSR_LLEXCL_SHIFT) +#define CSR_DISVC_SHIFT 15 +#define CSR_DISVC (_ULCAST_(1) << CSR_DISVC_SHIFT) +#define CSR_VCLRU_SHIFT 14 +#define CSR_VCLRU (_ULCAST_(1) << CSR_VCLRU_SHIFT) +#define CSR_DCLRU_SHIFT 13 +#define CSR_DCLRU (_ULCAST_(1) << CSR_DCLRU_SHIFT) +#define CSR_FASTLDQ_SHIFT 12 +#define CSR_FASTLDQ (_ULCAST_(1) << CSR_FASTLDQ_SHIFT) +#define CSR_USERCAC_SHIFT 11 +#define CSR_USERCAC (_ULCAST_(1) << CSR_USERCAC_SHIFT) +#define CSR_ANTI_MISPEC_SHIFT 10 +#define CSR_ANTI_MISPEC (_ULCAST_(1) << CSR_ANTI_MISPEC_SHIFT) +#define CSR_AUTO_FLUSHSFB_SHIFT 9 +#define CSR_AUTO_FLUSHSFB (_ULCAST_(1) << CSR_AUTO_FLUSHSFB_SHIFT) +#define CSR_STFILL_SHIFT 8 +#define CSR_STFILL (_ULCAST_(1) << CSR_STFILL_SHIFT) +#define CSR_LIFEP_SHIFT 7 +#define CSR_LIFEP (_ULCAST_(1) << CSR_LIFEP_SHIFT) +#define CSR_LLSYNC_SHIFT 6 +#define CSR_LLSYNC (_ULCAST_(1) << CSR_LLSYNC_SHIFT) +#define CSR_BRBTDIS_SHIFT 5 +#define CSR_BRBTDIS (_ULCAST_(1) << CSR_BRBTDIS_SHIFT) +#define CSR_RASDIS_SHIFT 4 +#define CSR_RASDIS (_ULCAST_(1) << CSR_RASDIS_SHIFT) +#define CSR_STPRE_SHIFT 2 +#define CSR_STPRE_WIDTH 2 +#define CSR_STPRE (_ULCAST_(3) << CSR_STPRE_SHIFT) +#define CSR_INSTPRE_SHIFT 1 +#define CSR_INSTPRE (_ULCAST_(1) << CSR_INSTPRE_SHIFT) +#define CSR_DATAPRE_SHIFT 0 +#define CSR_DATAPRE (_ULCAST_(1) << CSR_DATAPRE_SHIFT) + +#define LOONGARCH_CSR_IMPCTL2 0x81 /* Loongson config2 */ +#define CSR_FLUSH_MTLB_SHIFT 0 +#define CSR_FLUSH_MTLB (_ULCAST_(1) << CSR_FLUSH_MTLB_SHIFT) +#define CSR_FLUSH_STLB_SHIFT 1 +#define CSR_FLUSH_STLB (_ULCAST_(1) << CSR_FLUSH_STLB_SHIFT) +#define CSR_FLUSH_DTLB_SHIFT 2 +#define CSR_FLUSH_DTLB (_ULCAST_(1) << CSR_FLUSH_DTLB_SHIFT) +#define CSR_FLUSH_ITLB_SHIFT 3 +#define CSR_FLUSH_ITLB (_ULCAST_(1) << CSR_FLUSH_ITLB_SHIFT) +#define CSR_FLUSH_BTAC_SHIFT 4 +#define CSR_FLUSH_BTAC (_ULCAST_(1) << CSR_FLUSH_BTAC_SHIFT) + +#define LOONGARCH_CSR_GNMI 0x82 + +/* TLB Refill registers */ +#define LOONGARCH_CSR_TLBRENTRY 0x88 /* TLB refill exception entry */ +#define LOONGARCH_CSR_TLBRBADV 0x89 /* TLB refill badvaddr */ +#define LOONGARCH_CSR_TLBRERA 0x8a /* TLB refill ERA */ +#define LOONGARCH_CSR_TLBRSAVE 0x8b /* KSave for TLB refill exception */ +#define LOONGARCH_CSR_TLBRELO0 0x8c /* TLB refill entrylo0 */ +#define LOONGARCH_CSR_TLBRELO1 0x8d /* TLB refill entrylo1 */ +#define LOONGARCH_CSR_TLBREHI 0x8e /* TLB refill entryhi */ +#define CSR_TLBREHI_PS_SHIFT 0 +#define CSR_TLBREHI_PS (_ULCAST_(0x3f) << CSR_TLBREHI_PS_SHIFT) +#define LOONGARCH_CSR_TLBRPRMD 0x8f /* TLB refill mode info */ + +/* Machine Error registers */ +#define LOONGARCH_CSR_MERRCTL 0x90 /* MERRCTL */ +#define LOONGARCH_CSR_MERRINFO1 0x91 /* MError info1 */ +#define LOONGARCH_CSR_MERRINFO2 0x92 /* MError info2 */ +#define LOONGARCH_CSR_MERRENTRY 0x93 /* MError exception entry */ +#define LOONGARCH_CSR_MERRERA 0x94 /* MError exception ERA */ +#define LOONGARCH_CSR_MERRSAVE 0x95 /* KSave for machine error exception */ + +#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */ + +#define LOONGARCH_CSR_PRID 0xc0 + +/* Shadow MCSR : 0xc0 ~ 0xff */ +#define LOONGARCH_CSR_MCSR0 0xc0 /* CPUCFG0 and CPUCFG1 */ +#define MCSR0_INT_IMPL_SHIFT 58 +#define MCSR0_INT_IMPL 0 +#define MCSR0_IOCSR_BRD_SHIFT 57 +#define MCSR0_IOCSR_BRD (_ULCAST_(1) << MCSR0_IOCSR_BRD_SHIFT) +#define MCSR0_HUGEPG_SHIFT 56 +#define MCSR0_HUGEPG (_ULCAST_(1) << MCSR0_HUGEPG_SHIFT) +#define MCSR0_RPLMTLB_SHIFT 55 +#define MCSR0_RPLMTLB (_ULCAST_(1) << MCSR0_RPLMTLB_SHIFT) +#define MCSR0_EP_SHIFT 54 +#define MCSR0_EP (_ULCAST_(1) << MCSR0_EP_SHIFT) +#define MCSR0_RI_SHIFT 53 +#define MCSR0_RI (_ULCAST_(1) << MCSR0_RI_SHIFT) +#define MCSR0_UAL_SHIFT 52 +#define MCSR0_UAL (_ULCAST_(1) << MCSR0_UAL_SHIFT) +#define MCSR0_VABIT_SHIFT 44 +#define MCSR0_VABIT_WIDTH 8 +#define MCSR0_VABIT (_ULCAST_(0xff) << MCSR0_VABIT_SHIFT) +#define VABIT_DEFAULT 0x2f +#define MCSR0_PABIT_SHIFT 36 +#define MCSR0_PABIT_WIDTH 8 +#define MCSR0_PABIT (_ULCAST_(0xff) << MCSR0_PABIT_SHIFT) +#define PABIT_DEFAULT 0x2f +#define MCSR0_IOCSR_SHIFT 35 +#define MCSR0_IOCSR (_ULCAST_(1) << MCSR0_IOCSR_SHIFT) +#define MCSR0_PAGING_SHIFT 34 +#define MCSR0_PAGING (_ULCAST_(1) << MCSR0_PAGING_SHIFT) +#define MCSR0_GR64_SHIFT 33 +#define MCSR0_GR64 (_ULCAST_(1) << MCSR0_GR64_SHIFT) +#define GR64_DEFAULT 1 +#define MCSR0_GR32_SHIFT 32 +#define MCSR0_GR32 (_ULCAST_(1) << MCSR0_GR32_SHIFT) +#define GR32_DEFAULT 0 +#define MCSR0_PRID_WIDTH 32 +#define MCSR0_PRID 0x14C010 + +#define LOONGARCH_CSR_MCSR1 0xc1 /* CPUCFG2 and CPUCFG3 */ +#define MCSR1_HPFOLD_SHIFT 43 +#define MCSR1_HPFOLD (_ULCAST_(1) << MCSR1_HPFOLD_SHIFT) +#define MCSR1_SPW_LVL_SHIFT 40 +#define MCSR1_SPW_LVL_WIDTH 3 +#define MCSR1_SPW_LVL (_ULCAST_(7) << MCSR1_SPW_LVL_SHIFT) +#define MCSR1_ICACHET_SHIFT 39 +#define MCSR1_ICACHET (_ULCAST_(1) << MCSR1_ICACHET_SHIFT) +#define MCSR1_ITLBT_SHIFT 38 +#define MCSR1_ITLBT (_ULCAST_(1) << MCSR1_ITLBT_SHIFT) +#define MCSR1_LLDBAR_SHIFT 37 +#define MCSR1_LLDBAR (_ULCAST_(1) << MCSR1_LLDBAR_SHIFT) +#define MCSR1_SCDLY_SHIFT 36 +#define MCSR1_SCDLY (_ULCAST_(1) << MCSR1_SCDLY_SHIFT) +#define MCSR1_LLEXC_SHIFT 35 +#define MCSR1_LLEXC (_ULCAST_(1) << MCSR1_LLEXC_SHIFT) +#define MCSR1_UCACC_SHIFT 34 +#define MCSR1_UCACC (_ULCAST_(1) << MCSR1_UCACC_SHIFT) +#define MCSR1_SFB_SHIFT 33 +#define MCSR1_SFB (_ULCAST_(1) << MCSR1_SFB_SHIFT) +#define MCSR1_CCDMA_SHIFT 32 +#define MCSR1_CCDMA (_ULCAST_(1) << MCSR1_CCDMA_SHIFT) +#define MCSR1_LAMO_SHIFT 22 +#define MCSR1_LAMO (_ULCAST_(1) << MCSR1_LAMO_SHIFT) +#define MCSR1_LSPW_SHIFT 21 +#define MCSR1_LSPW (_ULCAST_(1) << MCSR1_LSPW_SHIFT) +#define MCSR1_MIPSBT_SHIFT 20 +#define MCSR1_MIPSBT (_ULCAST_(1) << MCSR1_MIPSBT_SHIFT) +#define MCSR1_ARMBT_SHIFT 19 +#define MCSR1_ARMBT (_ULCAST_(1) << MCSR1_ARMBT_SHIFT) +#define MCSR1_X86BT_SHIFT 18 +#define MCSR1_X86BT (_ULCAST_(1) << MCSR1_X86BT_SHIFT) +#define MCSR1_LLFTPVERS_SHIFT 15 +#define MCSR1_LLFTPVERS_WIDTH 3 +#define MCSR1_LLFTPVERS (_ULCAST_(7) << MCSR1_LLFTPVERS_SHIFT) +#define MCSR1_LLFTP_SHIFT 14 +#define MCSR1_LLFTP (_ULCAST_(1) << MCSR1_LLFTP_SHIFT) +#define MCSR1_VZVERS_SHIFT 11 +#define MCSR1_VZVERS_WIDTH 3 +#define MCSR1_VZVERS (_ULCAST_(7) << MCSR1_VZVERS_SHIFT) +#define MCSR1_VZ_SHIFT 10 +#define MCSR1_VZ (_ULCAST_(1) << MCSR1_VZ_SHIFT) +#define MCSR1_CRYPTO_SHIFT 9 +#define MCSR1_CRYPTO (_ULCAST_(1) << MCSR1_CRYPTO_SHIFT) +#define MCSR1_COMPLEX_SHIFT 8 +#define MCSR1_COMPLEX (_ULCAST_(1) << MCSR1_COMPLEX_SHIFT) +#define MCSR1_LASX_SHIFT 7 +#define MCSR1_LASX (_ULCAST_(1) << MCSR1_LASX_SHIFT) +#define MCSR1_LSX_SHIFT 6 +#define MCSR1_LSX (_ULCAST_(1) << MCSR1_LSX_SHIFT) +#define MCSR1_FPVERS_SHIFT 3 +#define MCSR1_FPVERS_WIDTH 3 +#define MCSR1_FPVERS (_ULCAST_(7) << MCSR1_FPVERS_SHIFT) +#define MCSR1_FPDP_SHIFT 2 +#define MCSR1_FPDP (_ULCAST_(1) << MCSR1_FPDP_SHIFT) +#define MCSR1_FPSP_SHIFT 1 +#define MCSR1_FPSP (_ULCAST_(1) << MCSR1_FPSP_SHIFT) +#define MCSR1_FP_SHIFT 0 +#define MCSR1_FP (_ULCAST_(1) << MCSR1_FP_SHIFT) + +#define LOONGARCH_CSR_MCSR2 0xc2 /* CPUCFG4 and CPUCFG5 */ +#define MCSR2_CCDIV_SHIFT 48 +#define MCSR2_CCDIV_WIDTH 16 +#define MCSR2_CCDIV (_ULCAST_(0xffff) << MCSR2_CCDIV_SHIFT) +#define MCSR2_CCMUL_SHIFT 32 +#define MCSR2_CCMUL_WIDTH 16 +#define MCSR2_CCMUL (_ULCAST_(0xffff) << MCSR2_CCMUL_SHIFT) +#define MCSR2_CCFREQ_WIDTH 32 +#define MCSR2_CCFREQ (_ULCAST_(0xffffffff)) +#define CCFREQ_DEFAULT 0x5f5e100 /* 100MHz */ + +#define LOONGARCH_CSR_MCSR3 0xc3 /* CPUCFG6 */ +#define MCSR3_UPM_SHIFT 14 +#define MCSR3_UPM (_ULCAST_(1) << MCSR3_UPM_SHIFT) +#define MCSR3_PMBITS_SHIFT 8 +#define MCSR3_PMBITS_WIDTH 6 +#define MCSR3_PMBITS (_ULCAST_(0x3f) << MCSR3_PMBITS_SHIFT) +#define PMBITS_DEFAULT 0x40 +#define MCSR3_PMNUM_SHIFT 4 +#define MCSR3_PMNUM_WIDTH 4 +#define MCSR3_PMNUM (_ULCAST_(0xf) << MCSR3_PMNUM_SHIFT) +#define MCSR3_PAMVER_SHIFT 1 +#define MCSR3_PAMVER_WIDTH 3 +#define MCSR3_PAMVER (_ULCAST_(0x7) << MCSR3_PAMVER_SHIFT) +#define MCSR3_PMP_SHIFT 0 +#define MCSR3_PMP (_ULCAST_(1) << MCSR3_PMP_SHIFT) + +#define LOONGARCH_CSR_MCSR8 0xc8 /* CPUCFG16 and CPUCFG17 */ +#define MCSR8_L1I_SIZE_SHIFT 56 +#define MCSR8_L1I_SIZE_WIDTH 7 +#define MCSR8_L1I_SIZE (_ULCAST_(0x7f) << MCSR8_L1I_SIZE_SHIFT) +#define MCSR8_L1I_IDX_SHIFT 48 +#define MCSR8_L1I_IDX_WIDTH 8 +#define MCSR8_L1I_IDX (_ULCAST_(0xff) << MCSR8_L1I_IDX_SHIFT) +#define MCSR8_L1I_WAY_SHIFT 32 +#define MCSR8_L1I_WAY_WIDTH 16 +#define MCSR8_L1I_WAY (_ULCAST_(0xffff) << MCSR8_L1I_WAY_SHIFT) +#define MCSR8_L3DINCL_SHIFT 16 +#define MCSR8_L3DINCL (_ULCAST_(1) << MCSR8_L3DINCL_SHIFT) +#define MCSR8_L3DPRIV_SHIFT 15 +#define MCSR8_L3DPRIV (_ULCAST_(1) << MCSR8_L3DPRIV_SHIFT) +#define MCSR8_L3DPRE_SHIFT 14 +#define MCSR8_L3DPRE (_ULCAST_(1) << MCSR8_L3DPRE_SHIFT) +#define MCSR8_L3IUINCL_SHIFT 13 +#define MCSR8_L3IUINCL (_ULCAST_(1) << MCSR8_L3IUINCL_SHIFT) +#define MCSR8_L3IUPRIV_SHIFT 12 +#define MCSR8_L3IUPRIV (_ULCAST_(1) << MCSR8_L3IUPRIV_SHIFT) +#define MCSR8_L3IUUNIFY_SHIFT 11 +#define MCSR8_L3IUUNIFY (_ULCAST_(1) << MCSR8_L3IUUNIFY_SHIFT) +#define MCSR8_L3IUPRE_SHIFT 10 +#define MCSR8_L3IUPRE (_ULCAST_(1) << MCSR8_L3IUPRE_SHIFT) +#define MCSR8_L2DINCL_SHIFT 9 +#define MCSR8_L2DINCL (_ULCAST_(1) << MCSR8_L2DINCL_SHIFT) +#define MCSR8_L2DPRIV_SHIFT 8 +#define MCSR8_L2DPRIV (_ULCAST_(1) << MCSR8_L2DPRIV_SHIFT) +#define MCSR8_L2DPRE_SHIFT 7 +#define MCSR8_L2DPRE (_ULCAST_(1) << MCSR8_L2DPRE_SHIFT) +#define MCSR8_L2IUINCL_SHIFT 6 +#define MCSR8_L2IUINCL (_ULCAST_(1) << MCSR8_L2IUINCL_SHIFT) +#define MCSR8_L2IUPRIV_SHIFT 5 +#define MCSR8_L2IUPRIV (_ULCAST_(1) << MCSR8_L2IUPRIV_SHIFT) +#define MCSR8_L2IUUNIFY_SHIFT 4 +#define MCSR8_L2IUUNIFY (_ULCAST_(1) << MCSR8_L2IUUNIFY_SHIFT) +#define MCSR8_L2IUPRE_SHIFT 3 +#define MCSR8_L2IUPRE (_ULCAST_(1) << MCSR8_L2IUPRE_SHIFT) +#define MCSR8_L1DPRE_SHIFT 2 +#define MCSR8_L1DPRE (_ULCAST_(1) << MCSR8_L1DPRE_SHIFT) +#define MCSR8_L1IUUNIFY_SHIFT 1 +#define MCSR8_L1IUUNIFY (_ULCAST_(1) << MCSR8_L1IUUNIFY_SHIFT) +#define MCSR8_L1IUPRE_SHIFT 0 +#define MCSR8_L1IUPRE (_ULCAST_(1) << MCSR8_L1IUPRE_SHIFT) + +#define LOONGARCH_CSR_MCSR9 0xc9 /* CPUCFG18 and CPUCFG19 */ +#define MCSR9_L2U_SIZE_SHIFT 56 +#define MCSR9_L2U_SIZE_WIDTH 7 +#define MCSR9_L2U_SIZE (_ULCAST_(0x7f) << MCSR9_L2U_SIZE_SHIFT) +#define MCSR9_L2U_IDX_SHIFT 48 +#define MCSR9_L2U_IDX_WIDTH 8 +#define MCSR9_L2U_IDX (_ULCAST_(0xff) << MCSR9_IDX_LOG_SHIFT) +#define MCSR9_L2U_WAY_SHIFT 32 +#define MCSR9_L2U_WAY_WIDTH 16 +#define MCSR9_L2U_WAY (_ULCAST_(0xffff) << MCSR9_L2U_WAY_SHIFT) +#define MCSR9_L1D_SIZE_SHIFT 24 +#define MCSR9_L1D_SIZE_WIDTH 7 +#define MCSR9_L1D_SIZE (_ULCAST_(0x7f) << MCSR9_L1D_SIZE_SHIFT) +#define MCSR9_L1D_IDX_SHIFT 16 +#define MCSR9_L1D_IDX_WIDTH 8 +#define MCSR9_L1D_IDX (_ULCAST_(0xff) << MCSR9_L1D_IDX_SHIFT) +#define MCSR9_L1D_WAY_SHIFT 0 +#define MCSR9_L1D_WAY_WIDTH 16 +#define MCSR9_L1D_WAY (_ULCAST_(0xffff) << MCSR9_L1D_WAY_SHIFT) + +#define LOONGARCH_CSR_MCSR10 0xca /* CPUCFG20 */ +#define MCSR10_L3U_SIZE_SHIFT 24 +#define MCSR10_L3U_SIZE_WIDTH 7 +#define MCSR10_L3U_SIZE (_ULCAST_(0x7f) << MCSR10_L3U_SIZE_SHIFT) +#define MCSR10_L3U_IDX_SHIFT 16 +#define MCSR10_L3U_IDX_WIDTH 8 +#define MCSR10_L3U_IDX (_ULCAST_(0xff) << MCSR10_L3U_IDX_SHIFT) +#define MCSR10_L3U_WAY_SHIFT 0 +#define MCSR10_L3U_WAY_WIDTH 16 +#define MCSR10_L3U_WAY (_ULCAST_(0xffff) << MCSR10_L3U_WAY_SHIFT) + +#define LOONGARCH_CSR_MCSR24 0xf0 /* cpucfg48 */ +#define MCSR24_RAMCG_SHIFT 3 +#define MCSR24_RAMCG (_ULCAST_(1) << MCSR24_RAMCG_SHIFT) +#define MCSR24_VFPUCG_SHIFT 2 +#define MCSR24_VFPUCG (_ULCAST_(1) << MCSR24_VFPUCG_SHIFT) +#define MCSR24_NAPEN_SHIFT 1 +#define MCSR24_NAPEN (_ULCAST_(1) << MCSR24_NAPEN_SHIFT) +#define MCSR24_MCSRLOCK_SHIFT 0 +#define MCSR24_MCSRLOCK (_ULCAST_(1) << MCSR24_MCSRLOCK_SHIFT) + +/* Uncached accelerate windows registers */ +#define LOONGARCH_CSR_UCAWIN 0x100 +#define LOONGARCH_CSR_UCAWIN0_LO 0x102 +#define LOONGARCH_CSR_UCAWIN0_HI 0x103 +#define LOONGARCH_CSR_UCAWIN1_LO 0x104 +#define LOONGARCH_CSR_UCAWIN1_HI 0x105 +#define LOONGARCH_CSR_UCAWIN2_LO 0x106 +#define LOONGARCH_CSR_UCAWIN2_HI 0x107 +#define LOONGARCH_CSR_UCAWIN3_LO 0x108 +#define LOONGARCH_CSR_UCAWIN3_HI 0x109 + +/* Direct Map windows registers */ +#define LOONGARCH_CSR_DMWIN0 0x180 /* 64 direct map win0: MEM & IF */ +#define LOONGARCH_CSR_DMWIN1 0x181 /* 64 direct map win1: MEM & IF */ +#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */ +#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */ + +/* Direct Map window 0/1 */ +#define CSR_DMW0_PLV0 _CONST64_(1 << 0) +#define CSR_DMW0_VSEG _CONST64_(0x8000) +#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS) +#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0) + +#define CSR_DMW1_PLV0 _CONST64_(1 << 0) +#define CSR_DMW1_MAT _CONST64_(1 << 4) +#define CSR_DMW1_VSEG _CONST64_(0x9000) +#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS) +#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0) + +/* Performance Counter registers */ +#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */ +#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */ +#define LOONGARCH_CSR_PERFCTRL1 0x202 /* 32 perf event 1 config */ +#define LOONGARCH_CSR_PERFCNTR1 0x203 /* 64 perf event 1 count value */ +#define LOONGARCH_CSR_PERFCTRL2 0x204 /* 32 perf event 2 config */ +#define LOONGARCH_CSR_PERFCNTR2 0x205 /* 64 perf event 2 count value */ +#define LOONGARCH_CSR_PERFCTRL3 0x206 /* 32 perf event 3 config */ +#define LOONGARCH_CSR_PERFCNTR3 0x207 /* 64 perf event 3 count value */ +#define CSR_PERFCTRL_PLV0 (_ULCAST_(1) << 16) +#define CSR_PERFCTRL_PLV1 (_ULCAST_(1) << 17) +#define CSR_PERFCTRL_PLV2 (_ULCAST_(1) << 18) +#define CSR_PERFCTRL_PLV3 (_ULCAST_(1) << 19) +#define CSR_PERFCTRL_IE (_ULCAST_(1) << 20) +#define CSR_PERFCTRL_EVENT 0x3ff + +/* Debug registers */ +#define LOONGARCH_CSR_MWPC 0x300 /* data breakpoint config */ +#define LOONGARCH_CSR_MWPS 0x301 /* data breakpoint status */ + +#define LOONGARCH_CSR_DB0ADDR 0x310 /* data breakpoint 0 address */ +#define LOONGARCH_CSR_DB0MASK 0x311 /* data breakpoint 0 mask */ +#define LOONGARCH_CSR_DB0CTL 0x312 /* data breakpoint 0 control */ +#define LOONGARCH_CSR_DB0ASID 0x313 /* data breakpoint 0 asid */ + +#define LOONGARCH_CSR_DB1ADDR 0x318 /* data breakpoint 1 address */ +#define LOONGARCH_CSR_DB1MASK 0x319 /* data breakpoint 1 mask */ +#define LOONGARCH_CSR_DB1CTL 0x31a /* data breakpoint 1 control */ +#define LOONGARCH_CSR_DB1ASID 0x31b /* data breakpoint 1 asid */ + +#define LOONGARCH_CSR_DB2ADDR 0x320 /* data breakpoint 2 address */ +#define LOONGARCH_CSR_DB2MASK 0x321 /* data breakpoint 2 mask */ +#define LOONGARCH_CSR_DB2CTL 0x322 /* data breakpoint 2 control */ +#define LOONGARCH_CSR_DB2ASID 0x323 /* data breakpoint 2 asid */ + +#define LOONGARCH_CSR_DB3ADDR 0x328 /* data breakpoint 3 address */ +#define LOONGARCH_CSR_DB3MASK 0x329 /* data breakpoint 3 mask */ +#define LOONGARCH_CSR_DB3CTL 0x32a /* data breakpoint 3 control */ +#define LOONGARCH_CSR_DB3ASID 0x32b /* data breakpoint 3 asid */ + +#define LOONGARCH_CSR_DB4ADDR 0x330 /* data breakpoint 4 address */ +#define LOONGARCH_CSR_DB4MASK 0x331 /* data breakpoint 4 maks */ +#define LOONGARCH_CSR_DB4CTL 0x332 /* data breakpoint 4 control */ +#define LOONGARCH_CSR_DB4ASID 0x333 /* data breakpoint 4 asid */ + +#define LOONGARCH_CSR_DB5ADDR 0x338 /* data breakpoint 5 address */ +#define LOONGARCH_CSR_DB5MASK 0x339 /* data breakpoint 5 mask */ +#define LOONGARCH_CSR_DB5CTL 0x33a /* data breakpoint 5 control */ +#define LOONGARCH_CSR_DB5ASID 0x33b /* data breakpoint 5 asid */ + +#define LOONGARCH_CSR_DB6ADDR 0x340 /* data breakpoint 6 address */ +#define LOONGARCH_CSR_DB6MASK 0x341 /* data breakpoint 6 mask */ +#define LOONGARCH_CSR_DB6CTL 0x342 /* data breakpoint 6 control */ +#define LOONGARCH_CSR_DB6ASID 0x343 /* data breakpoint 6 asid */ + +#define LOONGARCH_CSR_DB7ADDR 0x348 /* data breakpoint 7 address */ +#define LOONGARCH_CSR_DB7MASK 0x349 /* data breakpoint 7 mask */ +#define LOONGARCH_CSR_DB7CTL 0x34a /* data breakpoint 7 control */ +#define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */ + +#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */ +#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */ + +#define LOONGARCH_CSR_IB0ADDR 0x390 /* inst breakpoint 0 address */ +#define LOONGARCH_CSR_IB0MASK 0x391 /* inst breakpoint 0 mask */ +#define LOONGARCH_CSR_IB0CTL 0x392 /* inst breakpoint 0 control */ +#define LOONGARCH_CSR_IB0ASID 0x393 /* inst breakpoint 0 asid */ + +#define LOONGARCH_CSR_IB1ADDR 0x398 /* inst breakpoint 1 address */ +#define LOONGARCH_CSR_IB1MASK 0x399 /* inst breakpoint 1 mask */ +#define LOONGARCH_CSR_IB1CTL 0x39a /* inst breakpoint 1 control */ +#define LOONGARCH_CSR_IB1ASID 0x39b /* inst breakpoint 1 asid */ + +#define LOONGARCH_CSR_IB2ADDR 0x3a0 /* inst breakpoint 2 address */ +#define LOONGARCH_CSR_IB2MASK 0x3a1 /* inst breakpoint 2 mask */ +#define LOONGARCH_CSR_IB2CTL 0x3a2 /* inst breakpoint 2 control */ +#define LOONGARCH_CSR_IB2ASID 0x3a3 /* inst breakpoint 2 asid */ + +#define LOONGARCH_CSR_IB3ADDR 0x3a8 /* inst breakpoint 3 address */ +#define LOONGARCH_CSR_IB3MASK 0x3a9 /* breakpoint 3 mask */ +#define LOONGARCH_CSR_IB3CTL 0x3aa /* inst breakpoint 3 control */ +#define LOONGARCH_CSR_IB3ASID 0x3ab /* inst breakpoint 3 asid */ + +#define LOONGARCH_CSR_IB4ADDR 0x3b0 /* inst breakpoint 4 address */ +#define LOONGARCH_CSR_IB4MASK 0x3b1 /* inst breakpoint 4 mask */ +#define LOONGARCH_CSR_IB4CTL 0x3b2 /* inst breakpoint 4 control */ +#define LOONGARCH_CSR_IB4ASID 0x3b3 /* inst breakpoint 4 asid */ + +#define LOONGARCH_CSR_IB5ADDR 0x3b8 /* inst breakpoint 5 address */ +#define LOONGARCH_CSR_IB5MASK 0x3b9 /* inst breakpoint 5 mask */ +#define LOONGARCH_CSR_IB5CTL 0x3ba /* inst breakpoint 5 control */ +#define LOONGARCH_CSR_IB5ASID 0x3bb /* inst breakpoint 5 asid */ + +#define LOONGARCH_CSR_IB6ADDR 0x3c0 /* inst breakpoint 6 address */ +#define LOONGARCH_CSR_IB6MASK 0x3c1 /* inst breakpoint 6 mask */ +#define LOONGARCH_CSR_IB6CTL 0x3c2 /* inst breakpoint 6 control */ +#define LOONGARCH_CSR_IB6ASID 0x3c3 /* inst breakpoint 6 asid */ + +#define LOONGARCH_CSR_IB7ADDR 0x3c8 /* inst breakpoint 7 address */ +#define LOONGARCH_CSR_IB7MASK 0x3c9 /* inst breakpoint 7 mask */ +#define LOONGARCH_CSR_IB7CTL 0x3ca /* inst breakpoint 7 control */ +#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */ + +#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */ +#define LOONGARCH_CSR_DERA 0x501 /* debug era */ +#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */ + +/* + * CSR_ECFG IM + */ +#define ECFG0_IM 0x00001fff +#define ECFGB_SIP0 0 +#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0) +#define ECFGB_SIP1 1 +#define ECFGF_SIP1 (_ULCAST_(1) << ECFGB_SIP1) +#define ECFGB_IP0 2 +#define ECFGF_IP0 (_ULCAST_(1) << ECFGB_IP0) +#define ECFGB_IP1 3 +#define ECFGF_IP1 (_ULCAST_(1) << ECFGB_IP1) +#define ECFGB_IP2 4 +#define ECFGF_IP2 (_ULCAST_(1) << ECFGB_IP2) +#define ECFGB_IP3 5 +#define ECFGF_IP3 (_ULCAST_(1) << ECFGB_IP3) +#define ECFGB_IP4 6 +#define ECFGF_IP4 (_ULCAST_(1) << ECFGB_IP4) +#define ECFGB_IP5 7 +#define ECFGF_IP5 (_ULCAST_(1) << ECFGB_IP5) +#define ECFGB_IP6 8 +#define ECFGF_IP6 (_ULCAST_(1) << ECFGB_IP6) +#define ECFGB_IP7 9 +#define ECFGF_IP7 (_ULCAST_(1) << ECFGB_IP7) +#define ECFGB_PMC 10 +#define ECFGF_PMC (_ULCAST_(1) << ECFGB_PMC) +#define ECFGB_TIMER 11 +#define ECFGF_TIMER (_ULCAST_(1) << ECFGB_TIMER) +#define ECFGB_IPI 12 +#define ECFGF_IPI (_ULCAST_(1) << ECFGB_IPI) +#define ECFGF(hwirq) (_ULCAST_(1) << hwirq) + +#define ESTATF_IP 0x00001fff + +#define LOONGARCH_IOCSR_FEATURES 0x8 +#define IOCSRF_TEMP BIT_ULL(0) +#define IOCSRF_NODECNT BIT_ULL(1) +#define IOCSRF_MSI BIT_ULL(2) +#define IOCSRF_EXTIOI BIT_ULL(3) +#define IOCSRF_CSRIPI BIT_ULL(4) +#define IOCSRF_FREQCSR BIT_ULL(5) +#define IOCSRF_FREQSCALE BIT_ULL(6) +#define IOCSRF_DVFSV1 BIT_ULL(7) +#define IOCSRF_EIODECODE BIT_ULL(9) +#define IOCSRF_FLATMODE BIT_ULL(10) +#define IOCSRF_VM BIT_ULL(11) + +#define LOONGARCH_IOCSR_VENDOR 0x10 + +#define LOONGARCH_IOCSR_CPUNAME 0x20 + +#define LOONGARCH_IOCSR_NODECNT 0x408 + +#define LOONGARCH_IOCSR_MISC_FUNC 0x420 +#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21) +#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48) + +#define LOONGARCH_IOCSR_CPUTEMP 0x428 + +/* PerCore CSR, only accessible by local cores */ +#define LOONGARCH_IOCSR_IPI_STATUS 0x1000 +#define LOONGARCH_IOCSR_IPI_EN 0x1004 +#define LOONGARCH_IOCSR_IPI_SET 0x1008 +#define LOONGARCH_IOCSR_IPI_CLEAR 0x100c +#define LOONGARCH_IOCSR_MBUF0 0x1020 +#define LOONGARCH_IOCSR_MBUF1 0x1028 +#define LOONGARCH_IOCSR_MBUF2 0x1030 +#define LOONGARCH_IOCSR_MBUF3 0x1038 + +#define LOONGARCH_IOCSR_IPI_SEND 0x1040 +#define IOCSR_IPI_SEND_IP_SHIFT 0 +#define IOCSR_IPI_SEND_CPU_SHIFT 16 +#define IOCSR_IPI_SEND_BLOCKING BIT(31) + +#define LOONGARCH_IOCSR_MBUF_SEND 0x1048 +#define IOCSR_MBUF_SEND_BLOCKING BIT_ULL(31) +#define IOCSR_MBUF_SEND_BOX_SHIFT 2 +#define IOCSR_MBUF_SEND_BOX_LO(box) (box << 1) +#define IOCSR_MBUF_SEND_BOX_HI(box) ((box << 1) + 1) +#define IOCSR_MBUF_SEND_CPU_SHIFT 16 +#define IOCSR_MBUF_SEND_BUF_SHIFT 32 +#define IOCSR_MBUF_SEND_H32_MASK 0xFFFFFFFF00000000ULL + +#define LOONGARCH_IOCSR_ANY_SEND 0x1158 +#define IOCSR_ANY_SEND_BLOCKING BIT_ULL(31) +#define IOCSR_ANY_SEND_CPU_SHIFT 16 +#define IOCSR_ANY_SEND_MASK_SHIFT 27 +#define IOCSR_ANY_SEND_BUF_SHIFT 32 +#define IOCSR_ANY_SEND_H32_MASK 0xFFFFFFFF00000000ULL + +/* Register offset and bit definition for CSR access */ +#define LOONGARCH_IOCSR_TIMER_CFG 0x1060 +#define LOONGARCH_IOCSR_TIMER_TICK 0x1070 +#define IOCSR_TIMER_CFG_RESERVED (_ULCAST_(1) << 63) +#define IOCSR_TIMER_CFG_PERIODIC (_ULCAST_(1) << 62) +#define IOCSR_TIMER_CFG_EN (_ULCAST_(1) << 61) +#define IOCSR_TIMER_MASK 0x0ffffffffffffULL +#define IOCSR_TIMER_INITVAL_RST (_ULCAST_(0xffff) << 48) + +#define LOONGARCH_IOCSR_EXTIOI_NODEMAP_BASE 0x14a0 +#define LOONGARCH_IOCSR_EXTIOI_IPMAP_BASE 0x14c0 +#define LOONGARCH_IOCSR_EXTIOI_EN_BASE 0x1600 +#define LOONGARCH_IOCSR_EXTIOI_BOUNCE_BASE 0x1680 +#define LOONGARCH_IOCSR_EXTIOI_ISR_BASE 0x1800 +#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00 +#define IOCSR_EXTIOI_VECTOR_NUM 256 + +#ifndef __ASSEMBLY__ + +static inline u64 drdtime(void) +{ + int rID = 0; + u64 val = 0; + + __asm__ __volatile__( + "rdtime.d %0, %1 \n\t" + : "=r"(val), "=r"(rID) + : + ); + return val; +} + +static inline unsigned int get_csr_cpuid(void) +{ + return csr_read32(LOONGARCH_CSR_CPUID); +} + +static inline void csr_any_send(unsigned int addr, unsigned int data, + unsigned int data_mask, unsigned int cpu) +{ + uint64_t val = 0; + + val = IOCSR_ANY_SEND_BLOCKING | addr; + val |= (cpu << IOCSR_ANY_SEND_CPU_SHIFT); + val |= (data_mask << IOCSR_ANY_SEND_MASK_SHIFT); + val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT); + iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND); +} + +static inline unsigned int read_csr_excode(void) +{ + return (csr_read32(LOONGARCH_CSR_ESTAT) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; +} + +static inline void write_csr_index(unsigned int idx) +{ + csr_xchg32(idx, CSR_TLBIDX_IDXM, LOONGARCH_CSR_TLBIDX); +} + +static inline unsigned int read_csr_pagesize(void) +{ + return (csr_read32(LOONGARCH_CSR_TLBIDX) & CSR_TLBIDX_SIZEM) >> CSR_TLBIDX_SIZE; +} + +static inline void write_csr_pagesize(unsigned int size) +{ + csr_xchg32(size << CSR_TLBIDX_SIZE, CSR_TLBIDX_SIZEM, LOONGARCH_CSR_TLBIDX); +} + +static inline unsigned int read_csr_tlbrefill_pagesize(void) +{ + return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT; +} + +static inline void write_csr_tlbrefill_pagesize(unsigned int size) +{ + csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI); +} + +#define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID) +#define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID) +#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI) +#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI) +#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0) +#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0) +#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1) +#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1) +#define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG) +#define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG) +#define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT) +#define write_csr_estat(val) csr_write32(val, LOONGARCH_CSR_ESTAT) +#define read_csr_tlbidx() csr_read32(LOONGARCH_CSR_TLBIDX) +#define write_csr_tlbidx(val) csr_write32(val, LOONGARCH_CSR_TLBIDX) +#define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN) +#define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN) +#define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID) +#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1) +#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1) +#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2) +#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2) +#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3) +#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3) +#define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE) +#define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZE) +#define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG) +#define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG) +#define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR) +#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1) +#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1) +#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2) + +#define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0) +#define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0) +#define read_csr_perfctrl1() csr_read64(LOONGARCH_CSR_PERFCTRL1) +#define read_csr_perfcntr1() csr_read64(LOONGARCH_CSR_PERFCNTR1) +#define read_csr_perfctrl2() csr_read64(LOONGARCH_CSR_PERFCTRL2) +#define read_csr_perfcntr2() csr_read64(LOONGARCH_CSR_PERFCNTR2) +#define read_csr_perfctrl3() csr_read64(LOONGARCH_CSR_PERFCTRL3) +#define read_csr_perfcntr3() csr_read64(LOONGARCH_CSR_PERFCNTR3) +#define write_csr_perfctrl0(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL0) +#define write_csr_perfcntr0(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR0) +#define write_csr_perfctrl1(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL1) +#define write_csr_perfcntr1(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR1) +#define write_csr_perfctrl2(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL2) +#define write_csr_perfcntr2(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR2) +#define write_csr_perfctrl3(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL3) +#define write_csr_perfcntr3(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR3) + +/* + * Manipulate bits in a register. + */ +#define __BUILD_CSR_COMMON(name) \ +static inline unsigned long \ +set_##name(unsigned long set) \ +{ \ + unsigned long res, new; \ + \ + res = read_##name(); \ + new = res | set; \ + write_##name(new); \ + \ + return res; \ +} \ + \ +static inline unsigned long \ +clear_##name(unsigned long clear) \ +{ \ + unsigned long res, new; \ + \ + res = read_##name(); \ + new = res & ~clear; \ + write_##name(new); \ + \ + return res; \ +} \ + \ +static inline unsigned long \ +change_##name(unsigned long change, unsigned long val) \ +{ \ + unsigned long res, new; \ + \ + res = read_##name(); \ + new = res & ~change; \ + new |= (val & change); \ + write_##name(new); \ + \ + return res; \ +} + +#define __BUILD_CSR_OP(name) __BUILD_CSR_COMMON(csr_##name) + +__BUILD_CSR_OP(euen) +__BUILD_CSR_OP(ecfg) +__BUILD_CSR_OP(tlbidx) + +#define set_csr_estat(val) \ + csr_xchg32(val, val, LOONGARCH_CSR_ESTAT) +#define clear_csr_estat(val) \ + csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT) + +#endif /* __ASSEMBLY__ */ + +/* Generic EntryLo bit definitions */ +#define ENTRYLO_V (_ULCAST_(1) << 0) +#define ENTRYLO_D (_ULCAST_(1) << 1) +#define ENTRYLO_PLV_SHIFT 2 +#define ENTRYLO_PLV (_ULCAST_(3) << ENTRYLO_PLV_SHIFT) +#define ENTRYLO_C_SHIFT 4 +#define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT) +#define ENTRYLO_G (_ULCAST_(1) << 6) +#define ENTRYLO_NR (_ULCAST_(1) << 61) +#define ENTRYLO_NX (_ULCAST_(1) << 62) + +/* Values for PageSize register */ +#define PS_4K 0x0000000c +#define PS_8K 0x0000000d +#define PS_16K 0x0000000e +#define PS_32K 0x0000000f +#define PS_64K 0x00000010 +#define PS_128K 0x00000011 +#define PS_256K 0x00000012 +#define PS_512K 0x00000013 +#define PS_1M 0x00000014 +#define PS_2M 0x00000015 +#define PS_4M 0x00000016 +#define PS_8M 0x00000017 +#define PS_16M 0x00000018 +#define PS_32M 0x00000019 +#define PS_64M 0x0000001a +#define PS_128M 0x0000001b +#define PS_256M 0x0000001c +#define PS_512M 0x0000001d +#define PS_1G 0x0000001e + +/* Default page size for a given kernel configuration */ +#ifdef CONFIG_PAGE_SIZE_4KB +#define PS_DEFAULT_SIZE PS_4K +#elif defined(CONFIG_PAGE_SIZE_16KB) +#define PS_DEFAULT_SIZE PS_16K +#elif defined(CONFIG_PAGE_SIZE_64KB) +#define PS_DEFAULT_SIZE PS_64K +#else +#error Bad page size configuration! +#endif + +/* Default huge tlb size for a given kernel configuration */ +#ifdef CONFIG_PAGE_SIZE_4KB +#define PS_HUGE_SIZE PS_1M +#elif defined(CONFIG_PAGE_SIZE_16KB) +#define PS_HUGE_SIZE PS_16M +#elif defined(CONFIG_PAGE_SIZE_64KB) +#define PS_HUGE_SIZE PS_256M +#else +#error Bad page size configuration for hugetlbfs! +#endif + +/* ExStatus.ExcCode */ +#define EXCCODE_RSV 0 /* Reserved */ +#define EXCCODE_TLBL 1 /* TLB miss on a load */ +#define EXCCODE_TLBS 2 /* TLB miss on a store */ +#define EXCCODE_TLBI 3 /* TLB miss on a ifetch */ +#define EXCCODE_TLBM 4 /* TLB modified fault */ +#define EXCCODE_TLBNR 5 /* TLB Read-Inhibit exception */ +#define EXCCODE_TLBNX 6 /* TLB Execution-Inhibit exception */ +#define EXCCODE_TLBPE 7 /* TLB Privilege Error */ +#define EXCCODE_ADE 8 /* Address Error */ + #define EXSUBCODE_ADEF 0 /* Fetch Instruction */ + #define EXSUBCODE_ADEM 1 /* Access Memory*/ +#define EXCCODE_ALE 9 /* Unalign Access */ +#define EXCCODE_OOB 10 /* Out of bounds */ +#define EXCCODE_SYS 11 /* System call */ +#define EXCCODE_BP 12 /* Breakpoint */ +#define EXCCODE_INE 13 /* Inst. Not Exist */ +#define EXCCODE_IPE 14 /* Inst. Privileged Error */ +#define EXCCODE_FPDIS 15 /* FPU Disabled */ +#define EXCCODE_LSXDIS 16 /* LSX Disabled */ +#define EXCCODE_LASXDIS 17 /* LASX Disabled */ +#define EXCCODE_FPE 18 /* Floating Point Exception */ + #define EXCSUBCODE_FPE 0 /* Floating Point Exception */ + #define EXCSUBCODE_VFPE 1 /* Vector Exception */ +#define EXCCODE_WATCH 19 /* Watch address reference */ +#define EXCCODE_BTDIS 20 /* Binary Trans. Disabled */ +#define EXCCODE_BTE 21 /* Binary Trans. Exception */ +#define EXCCODE_PSI 22 /* Guest Privileged Error */ +#define EXCCODE_HYP 23 /* Hypercall */ +#define EXCCODE_GCM 24 /* Guest CSR modified */ + #define EXCSUBCODE_GCSC 0 /* Software caused */ + #define EXCSUBCODE_GCHC 1 /* Hardware caused */ +#define EXCCODE_SE 25 /* Security */ + +#define EXCCODE_INT_START 64 +#define EXCCODE_SIP0 64 +#define EXCCODE_SIP1 65 +#define EXCCODE_IP0 66 +#define EXCCODE_IP1 67 +#define EXCCODE_IP2 68 +#define EXCCODE_IP3 69 +#define EXCCODE_IP4 70 +#define EXCCODE_IP5 71 +#define EXCCODE_IP6 72 +#define EXCCODE_IP7 73 +#define EXCCODE_PMC 74 /* Performance Counter */ +#define EXCCODE_TIMER 75 +#define EXCCODE_IPI 76 +#define EXCCODE_NMI 77 +#define EXCCODE_INT_END 78 +#define EXCCODE_INT_NUM (EXCCODE_INT_END - EXCCODE_INT_START) + +/* FPU register names */ +#define LOONGARCH_FCSR0 $r0 +#define LOONGARCH_FCSR1 $r1 +#define LOONGARCH_FCSR2 $r2 +#define LOONGARCH_FCSR3 $r3 + +/* FPU Status Register Values */ +#define FPU_CSR_RSVD 0xe0e0fce0 + +/* + * X the exception cause indicator + * E the exception enable + * S the sticky/flag bit + */ +#define FPU_CSR_ALL_X 0x1f000000 +#define FPU_CSR_INV_X 0x10000000 +#define FPU_CSR_DIV_X 0x08000000 +#define FPU_CSR_OVF_X 0x04000000 +#define FPU_CSR_UDF_X 0x02000000 +#define FPU_CSR_INE_X 0x01000000 + +#define FPU_CSR_ALL_S 0x001f0000 +#define FPU_CSR_INV_S 0x00100000 +#define FPU_CSR_DIV_S 0x00080000 +#define FPU_CSR_OVF_S 0x00040000 +#define FPU_CSR_UDF_S 0x00020000 +#define FPU_CSR_INE_S 0x00010000 + +#define FPU_CSR_ALL_E 0x0000001f +#define FPU_CSR_INV_E 0x00000010 +#define FPU_CSR_DIV_E 0x00000008 +#define FPU_CSR_OVF_E 0x00000004 +#define FPU_CSR_UDF_E 0x00000002 +#define FPU_CSR_INE_E 0x00000001 + +/* Bits 8 and 9 of FPU Status Register specify the rounding mode */ +#define FPU_CSR_RM 0x300 +#define FPU_CSR_RN 0x000 /* nearest */ +#define FPU_CSR_RZ 0x100 /* towards zero */ +#define FPU_CSR_RU 0x200 /* towards +Infinity */ +#define FPU_CSR_RD 0x300 /* towards -Infinity */ + +#define read_fcsr(source) \ +({ \ + unsigned int __res; \ +\ + __asm__ __volatile__( \ + " movfcsr2gr %0, "__stringify(source)" \n" \ + : "=r" (__res)); \ + __res; \ +}) + +#define write_fcsr(dest, val) \ +do { \ + __asm__ __volatile__( \ + " movgr2fcsr "__stringify(dest)", %0 \n" \ + : : "r" (val)); \ +} while (0) + +#endif /* _ASM_LOONGARCH_H */ diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h new file mode 100644 index 000000000..00db93eda --- /dev/null +++ b/arch/loongarch/include/asm/loongson.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGSON_H +#define __ASM_LOONGSON_H + +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <asm/addrspace.h> +#include <asm/bootinfo.h> + +#define LOONGSON_REG(x) \ + (*(volatile u32 *)((char *)TO_UNCACHE(LOONGSON_REG_BASE) + (x))) + +#define LOONGSON_LIO_BASE 0x18000000 +#define LOONGSON_LIO_SIZE 0x00100000 /* 1M */ +#define LOONGSON_LIO_TOP (LOONGSON_LIO_BASE+LOONGSON_LIO_SIZE-1) + +#define LOONGSON_BOOT_BASE 0x1c000000 +#define LOONGSON_BOOT_SIZE 0x02000000 /* 32M */ +#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1) + +#define LOONGSON_REG_BASE 0x1fe00000 +#define LOONGSON_REG_SIZE 0x00100000 /* 1M */ +#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1) + +/* GPIO Regs - r/w */ + +#define LOONGSON_GPIODATA LOONGSON_REG(0x11c) +#define LOONGSON_GPIOIE LOONGSON_REG(0x120) +#define LOONGSON_REG_GPIO_BASE (LOONGSON_REG_BASE + 0x11c) + +#define MAX_PACKAGES 16 + +#define xconf_readl(addr) readl(addr) +#define xconf_readq(addr) readq(addr) + +static inline void xconf_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile ( + " st.w %[v], %[hw], 0 \n" + " ld.b $zero, %[hw], 0 \n" + : + : [hw] "r" (addr), [v] "r" (val) + ); +} + +static inline void xconf_writeq(u64 val64, volatile void __iomem *addr) +{ + asm volatile ( + " st.d %[v], %[hw], 0 \n" + " ld.b $zero, %[hw], 0 \n" + : + : [hw] "r" (addr), [v] "r" (val64) + ); +} + +/* ============== LS7A registers =============== */ +#define LS7A_PCH_REG_BASE 0x10000000UL +/* LPC regs */ +#define LS7A_LPC_REG_BASE (LS7A_PCH_REG_BASE + 0x00002000) +/* CHIPCFG regs */ +#define LS7A_CHIPCFG_REG_BASE (LS7A_PCH_REG_BASE + 0x00010000) +/* MISC reg base */ +#define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000) +/* ACPI regs */ +#define LS7A_ACPI_REG_BASE (LS7A_MISC_REG_BASE + 0x00050000) +/* RTC regs */ +#define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100) + +#define LS7A_DMA_CFG (volatile void *)TO_UNCACHE(LS7A_CHIPCFG_REG_BASE + 0x041c) +#define LS7A_DMA_NODE_SHF 8 +#define LS7A_DMA_NODE_MASK 0x1F00 + +#define LS7A_INT_MASK_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x020) +#define LS7A_INT_EDGE_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x060) +#define LS7A_INT_CLEAR_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x080) +#define LS7A_INT_HTMSI_EN_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x040) +#define LS7A_INT_ROUTE_ENTRY_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x100) +#define LS7A_INT_HTMSI_VEC_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200) +#define LS7A_INT_STATUS_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3a0) +#define LS7A_INT_POL_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3e0) +#define LS7A_LPC_INT_CTL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2000) +#define LS7A_LPC_INT_ENA (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2004) +#define LS7A_LPC_INT_STS (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2008) +#define LS7A_LPC_INT_CLR (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200c) +#define LS7A_LPC_INT_POL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2010) + +#define LS7A_PMCON_SOC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x000) +#define LS7A_PMCON_RESUME_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x004) +#define LS7A_PMCON_RTC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x008) +#define LS7A_PM1_EVT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x00c) +#define LS7A_PM1_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x010) +#define LS7A_PM1_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x014) +#define LS7A_PM1_TMR_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x018) +#define LS7A_P_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x01c) +#define LS7A_GPE0_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x028) +#define LS7A_GPE0_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x02c) +#define LS7A_RST_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x030) +#define LS7A_WD_SET_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x034) +#define LS7A_WD_TIMER_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x038) +#define LS7A_THSENS_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x04c) +#define LS7A_GEN_RTC_1_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x050) +#define LS7A_GEN_RTC_2_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x054) +#define LS7A_DPM_CFG_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x400) +#define LS7A_DPM_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x404) +#define LS7A_DPM_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x408) + +typedef enum { + ACPI_PCI_HOTPLUG_STATUS = 1 << 1, + ACPI_CPU_HOTPLUG_STATUS = 1 << 2, + ACPI_MEM_HOTPLUG_STATUS = 1 << 3, + ACPI_POWERBUTTON_STATUS = 1 << 8, + ACPI_RTC_WAKE_STATUS = 1 << 10, + ACPI_PCI_WAKE_STATUS = 1 << 14, + ACPI_ANY_WAKE_STATUS = 1 << 15, +} AcpiEventStatusBits; + +#define HT1LO_OFFSET 0xe0000000000UL + +/* PCI Configuration Space Base */ +#define MCFG_EXT_PCICFG_BASE 0xefe00000000UL + +/* REG ACCESS*/ +#define ls7a_readb(addr) (*(volatile unsigned char *)TO_UNCACHE(addr)) +#define ls7a_readw(addr) (*(volatile unsigned short *)TO_UNCACHE(addr)) +#define ls7a_readl(addr) (*(volatile unsigned int *)TO_UNCACHE(addr)) +#define ls7a_readq(addr) (*(volatile unsigned long *)TO_UNCACHE(addr)) +#define ls7a_writeb(val, addr) *(volatile unsigned char *)TO_UNCACHE(addr) = (val) +#define ls7a_writew(val, addr) *(volatile unsigned short *)TO_UNCACHE(addr) = (val) +#define ls7a_writel(val, addr) *(volatile unsigned int *)TO_UNCACHE(addr) = (val) +#define ls7a_writeq(val, addr) *(volatile unsigned long *)TO_UNCACHE(addr) = (val) + +#endif /* __ASM_LOONGSON_H */ diff --git a/arch/loongarch/include/asm/mmu.h b/arch/loongarch/include/asm/mmu.h new file mode 100644 index 000000000..0cc2d0803 --- /dev/null +++ b/arch/loongarch/include/asm/mmu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_MMU_H +#define __ASM_MMU_H + +#include <linux/atomic.h> +#include <linux/spinlock.h> + +typedef struct { + u64 asid[NR_CPUS]; + void *vdso; +} mm_context_t; + +#endif /* __ASM_MMU_H */ diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h new file mode 100644 index 000000000..9f97c3453 --- /dev/null +++ b/arch/loongarch/include/asm/mmu_context.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Switch a MMU context. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_MMU_CONTEXT_H +#define _ASM_MMU_CONTEXT_H + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/mm_types.h> +#include <linux/smp.h> +#include <linux/slab.h> + +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include <asm-generic/mm_hooks.h> + +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ +static inline u64 asid_version_mask(unsigned int cpu) +{ + return ~(u64)(cpu_asid_mask(&cpu_data[cpu])); +} + +static inline u64 asid_first_version(unsigned int cpu) +{ + return cpu_asid_mask(&cpu_data[cpu]) + 1; +} + +#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) +#define asid_cache(cpu) (cpu_data[cpu].asid_cache) +#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) + +static inline int asid_valid(struct mm_struct *mm, unsigned int cpu) +{ + if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu)) + return 0; + + return 1; +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +/* Normal, classic get_new_mmu_context */ +static inline void +get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) +{ + u64 asid = asid_cache(cpu); + + if (!((++asid) & cpu_asid_mask(&cpu_data[cpu]))) + local_flush_tlb_user(); /* start new asid cycle */ + + cpu_context(cpu, mm) = asid_cache(cpu) = asid; +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + int i; + + for_each_possible_cpu(i) + cpu_context(i, mm) = 0; + + return 0; +} + +static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + + /* Check if our ASID is of an older version and thus invalid */ + if (!asid_valid(next, cpu)) + get_new_mmu_context(next, cpu); + + write_csr_asid(cpu_asid(cpu, next)); + + if (next != &init_mm) + csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL); + else + csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + + /* + * Mark current->active_mm as not "active" anymore. + * We don't want to mislead possible IPI tlb flush routines. + */ + cpumask_set_cpu(cpu, mm_cpumask(next)); +} + +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +static inline void destroy_context(struct mm_struct *mm) +{ +} + +#define activate_mm(prev, next) switch_mm(prev, next, current) +#define deactivate_mm(task, mm) do { } while (0) + +/* + * If mm is currently active, we can't really drop it. + * Instead, we will get a new one for it. + */ +static inline void +drop_mmu_context(struct mm_struct *mm, unsigned int cpu) +{ + int asid; + unsigned long flags; + + local_irq_save(flags); + + asid = read_csr_asid() & cpu_asid_mask(¤t_cpu_data); + + if (asid == cpu_asid(cpu, mm)) { + if (!current->mm || (current->mm == mm)) { + get_new_mmu_context(mm, cpu); + write_csr_asid(cpu_asid(cpu, mm)); + goto out; + } + } + + /* Will get a new context next time */ + cpu_context(cpu, mm) = 0; + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +out: + local_irq_restore(flags); +} + +#endif /* _ASM_MMU_CONTEXT_H */ diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h new file mode 100644 index 000000000..fe67d0b4b --- /dev/null +++ b/arch/loongarch/include/asm/mmzone.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen (chenhuacai@loongson.cn) + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_MMZONE_H_ +#define _ASM_MMZONE_H_ + +#include <asm/page.h> +#include <asm/numa.h> + +extern struct pglist_data *node_data[]; + +#define NODE_DATA(nid) (node_data[(nid)]) + +extern void setup_zero_pages(void); + +#endif /* _ASM_MMZONE_H_ */ diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h new file mode 100644 index 000000000..b29b19a46 --- /dev/null +++ b/arch/loongarch/include/asm/module.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_MODULE_H +#define _ASM_MODULE_H + +#include <asm/inst.h> +#include <asm-generic/module.h> + +#define RELA_STACK_DEPTH 16 + +struct mod_section { + Elf_Shdr *shdr; + int num_entries; + int max_entries; +}; + +struct mod_arch_specific { + struct mod_section got; + struct mod_section plt; + struct mod_section plt_idx; +}; + +struct got_entry { + Elf_Addr symbol_addr; +}; + +struct plt_entry { + u32 inst_lu12iw; + u32 inst_lu32id; + u32 inst_lu52id; + u32 inst_jirl; +}; + +struct plt_idx_entry { + Elf_Addr symbol_addr; +}; + +Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val); +Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val); + +static inline struct got_entry emit_got_entry(Elf_Addr val) +{ + return (struct got_entry) { val }; +} + +static inline struct plt_entry emit_plt_entry(unsigned long val) +{ + u32 lu12iw, lu32id, lu52id, jirl; + + lu12iw = (lu12iw_op << 25 | (((val >> 12) & 0xfffff) << 5) | LOONGARCH_GPR_T1); + lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID)); + lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID)); + jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, 0, (val & 0xfff)); + + return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl }; +} + +static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val) +{ + return (struct plt_idx_entry) { val }; +} + +static inline int get_plt_idx(unsigned long val, const struct mod_section *sec) +{ + int i; + struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sec->shdr->sh_addr; + + for (i = 0; i < sec->num_entries; i++) { + if (plt_idx[i].symbol_addr == val) + return i; + } + + return -1; +} + +static inline struct plt_entry *get_plt_entry(unsigned long val, + const struct mod_section *sec_plt, + const struct mod_section *sec_plt_idx) +{ + int plt_idx = get_plt_idx(val, sec_plt_idx); + struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; + + if (plt_idx < 0) + return NULL; + + return plt + plt_idx; +} + +static inline struct got_entry *get_got_entry(Elf_Addr val, + const struct mod_section *sec) +{ + struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr; + int i; + + for (i = 0; i < sec->num_entries; i++) + if (got[i].symbol_addr == val) + return &got[i]; + return NULL; +} + +#endif /* _ASM_MODULE_H */ diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h new file mode 100644 index 000000000..a3d1bc0fc --- /dev/null +++ b/arch/loongarch/include/asm/module.lds.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +SECTIONS { + . = ALIGN(4); + .got : { BYTE(0) } + .plt : { BYTE(0) } + .plt.idx : { BYTE(0) } +} diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h new file mode 100644 index 000000000..27f319b49 --- /dev/null +++ b/arch/loongarch/include/asm/numa.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Jianmin Lv <lvjianmin@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_NUMA_H +#define _ASM_LOONGARCH_NUMA_H + +#include <linux/nodemask.h> + +#define NODE_ADDRSPACE_SHIFT 44 + +#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) +#define nid_to_addrbase(nid) (_ULCAST_(nid) << NODE_ADDRSPACE_SHIFT) + +#ifdef CONFIG_NUMA + +extern int numa_off; +extern s16 __cpuid_to_node[CONFIG_NR_CPUS]; +extern nodemask_t numa_nodes_parsed __initdata; + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; + +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); + +extern void __init early_numa_add_cpu(int cpuid, s16 node); +extern void numa_add_cpu(unsigned int cpu); +extern void numa_remove_cpu(unsigned int cpu); + +static inline void numa_clear_node(int cpu) +{ +} + +static inline void set_cpuid_to_node(int cpuid, s16 node) +{ + __cpuid_to_node[cpuid] = node; +} + +extern int early_cpu_to_node(int cpu); + +#else + +static inline void early_numa_add_cpu(int cpuid, s16 node) { } +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } + +static inline int early_cpu_to_node(int cpu) +{ + return 0; +} + +#endif /* CONFIG_NUMA */ + +#endif /* _ASM_LOONGARCH_NUMA_H */ diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h new file mode 100644 index 000000000..53f284a96 --- /dev/null +++ b/arch/loongarch/include/asm/page.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PAGE_H +#define _ASM_PAGE_H + +#include <linux/const.h> +#include <asm/addrspace.h> + +/* + * PAGE_SHIFT determines the page size + */ +#ifdef CONFIG_PAGE_SIZE_4KB +#define PAGE_SHIFT 12 +#endif +#ifdef CONFIG_PAGE_SIZE_16KB +#define PAGE_SHIFT 14 +#endif +#ifdef CONFIG_PAGE_SIZE_64KB +#define PAGE_SHIFT 16 +#endif +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#ifndef __ASSEMBLY__ + +#include <linux/kernel.h> +#include <linux/pfn.h> + +/* + * It's normally defined only for FLATMEM config but it's + * used in our early mem init code for all memory models. + * So always define it. + */ +#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET) + +extern void clear_page(void *page); +extern void copy_page(void *to, void *from); + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +extern unsigned long shm_align_mask; + +struct page; +struct vm_area_struct; +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE + +typedef struct { unsigned long pte; } pte_t; +#define pte_val(x) ((x).pte) +#define __pte(x) ((pte_t) { (x) }) +typedef struct page *pgtable_t; + +typedef struct { unsigned long pgd; } pgd_t; +#define pgd_val(x) ((x).pgd) +#define __pgd(x) ((pgd_t) { (x) }) + +/* + * Manipulate page protection bits + */ +typedef struct { unsigned long pgprot; } pgprot_t; +#define pgprot_val(x) ((x).pgprot) +#define __pgprot(x) ((pgprot_t) { (x) }) +#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK) + +#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) + +/* + * __pa()/__va() should be used only during mem init. + */ +#define __pa(x) PHYSADDR(x) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) + +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) + +#ifdef CONFIG_FLATMEM + +static inline int pfn_valid(unsigned long pfn) +{ + /* avoid <linux/mm.h> include hell */ + extern unsigned long max_mapnr; + unsigned long pfn_offset = ARCH_PFN_OFFSET; + + return pfn >= pfn_offset && pfn < max_mapnr; +} + +#endif + +#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr)) +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) + +extern int __virt_addr_valid(volatile void *kaddr); +#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr)) + +#define VM_DATA_DEFAULT_FLAGS \ + (VM_READ | VM_WRITE | \ + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_PAGE_H */ diff --git a/arch/loongarch/include/asm/pci.h b/arch/loongarch/include/asm/pci.h new file mode 100644 index 000000000..846909d7e --- /dev/null +++ b/arch/loongarch/include/asm/pci.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PCI_H +#define _ASM_PCI_H + +#include <linux/ioport.h> +#include <linux/list.h> +#include <linux/types.h> +#include <asm/io.h> + +#define PCIBIOS_MIN_IO 0x4000 +#define PCIBIOS_MIN_MEM 0x20000000 +#define PCIBIOS_MIN_CARDBUS_IO 0x4000 + +#define HAVE_PCI_MMAP +#define pcibios_assign_all_busses() 0 + +extern phys_addr_t mcfg_addr_init(int node); + +/* generic pci stuff */ +#include <asm-generic/pci.h> + +#endif /* _ASM_PCI_H */ diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h new file mode 100644 index 000000000..302f0e339 --- /dev/null +++ b/arch/loongarch/include/asm/percpu.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_PERCPU_H +#define __ASM_PERCPU_H + +#include <asm/cmpxchg.h> +#include <asm/loongarch.h> + +/* + * The "address" (in fact, offset from $r21) of a per-CPU variable is close to + * the loading address of main kernel image, but far from where the modules are + * loaded. Tell the compiler this fact when using explicit relocs. + */ +#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) +#define PER_CPU_ATTRIBUTES __attribute__((model("extreme"))) +#endif + +/* Use r21 for fast access */ +register unsigned long __my_cpu_offset __asm__("$r21"); + +static inline void set_my_cpu_offset(unsigned long off) +{ + __my_cpu_offset = off; + csr_write64(off, PERCPU_BASE_KS); +} +#define __my_cpu_offset __my_cpu_offset + +#define PERCPU_OP(op, asm_op, c_op) \ +static __always_inline unsigned long __percpu_##op(void *ptr, \ + unsigned long val, int size) \ +{ \ + unsigned long ret; \ + \ + switch (size) { \ + case 4: \ + __asm__ __volatile__( \ + "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \ + : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \ + : [val] "r" (val)); \ + break; \ + case 8: \ + __asm__ __volatile__( \ + "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \ + : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \ + : [val] "r" (val)); \ + break; \ + default: \ + ret = 0; \ + BUILD_BUG(); \ + } \ + \ + return ret c_op val; \ +} + +PERCPU_OP(add, add, +) +PERCPU_OP(and, and, &) +PERCPU_OP(or, or, |) +#undef PERCPU_OP + +static __always_inline unsigned long __percpu_read(void *ptr, int size) +{ + unsigned long ret; + + switch (size) { + case 1: + __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n" + : [ret] "=&r"(ret) + : [ptr] "r"(ptr) + : "memory"); + break; + case 2: + __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n" + : [ret] "=&r"(ret) + : [ptr] "r"(ptr) + : "memory"); + break; + case 4: + __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n" + : [ret] "=&r"(ret) + : [ptr] "r"(ptr) + : "memory"); + break; + case 8: + __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n" + : [ret] "=&r"(ret) + : [ptr] "r"(ptr) + : "memory"); + break; + default: + ret = 0; + BUILD_BUG(); + } + + return ret; +} + +static __always_inline void __percpu_write(void *ptr, unsigned long val, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n" + : + : [val] "r" (val), [ptr] "r" (ptr) + : "memory"); + break; + case 2: + __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n" + : + : [val] "r" (val), [ptr] "r" (ptr) + : "memory"); + break; + case 4: + __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n" + : + : [val] "r" (val), [ptr] "r" (ptr) + : "memory"); + break; + case 8: + __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n" + : + : [val] "r" (val), [ptr] "r" (ptr) + : "memory"); + break; + default: + BUILD_BUG(); + } +} + +static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, + int size) +{ + switch (size) { + case 1: + case 2: + return __xchg_small((volatile void *)ptr, val, size); + + case 4: + return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val); + + case 8: + return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val); + + default: + BUILD_BUG(); + } + + return 0; +} + +/* this_cpu_cmpxchg */ +#define _protect_cmpxchg_local(pcp, o, n) \ +({ \ + typeof(*raw_cpu_ptr(&(pcp))) __ret; \ + preempt_disable_notrace(); \ + __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ + preempt_enable_notrace(); \ + __ret; \ +}) + +#define _percpu_read(pcp) \ +({ \ + typeof(pcp) __retval; \ + __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \ + __retval; \ +}) + +#define _percpu_write(pcp, val) \ +do { \ + __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \ +} while (0) \ + +#define _pcp_protect(operation, pcp, val) \ +({ \ + typeof(pcp) __retval; \ + preempt_disable_notrace(); \ + __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ + (val), sizeof(pcp)); \ + preempt_enable_notrace(); \ + __retval; \ +}) + +#define _percpu_add(pcp, val) \ + _pcp_protect(__percpu_add, pcp, val) + +#define _percpu_add_return(pcp, val) _percpu_add(pcp, val) + +#define _percpu_and(pcp, val) \ + _pcp_protect(__percpu_and, pcp, val) + +#define _percpu_or(pcp, val) \ + _pcp_protect(__percpu_or, pcp, val) + +#define _percpu_xchg(pcp, val) ((typeof(pcp)) \ + _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))) + +#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) +#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) + +#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val) +#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val) + +#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val) +#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val) + +#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) +#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) + +#define this_cpu_read_1(pcp) _percpu_read(pcp) +#define this_cpu_read_2(pcp) _percpu_read(pcp) +#define this_cpu_read_4(pcp) _percpu_read(pcp) +#define this_cpu_read_8(pcp) _percpu_read(pcp) + +#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) + +#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) + +#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) + +#include <asm-generic/percpu.h> + +#endif /* __ASM_PERCPU_H */ diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h new file mode 100644 index 000000000..2a35a0bc2 --- /dev/null +++ b/arch/loongarch/include/asm/perf_event.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __LOONGARCH_PERF_EVENT_H__ +#define __LOONGARCH_PERF_EVENT_H__ + +#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs + +#endif /* __LOONGARCH_PERF_EVENT_H__ */ diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h new file mode 100644 index 000000000..4bfeb3c9c --- /dev/null +++ b/arch/loongarch/include/asm/pgalloc.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PGALLOC_H +#define _ASM_PGALLOC_H + +#include <linux/mm.h> +#include <linux/sched.h> + +#define __HAVE_ARCH_PMD_ALLOC_ONE +#define __HAVE_ARCH_PUD_ALLOC_ONE +#include <asm-generic/pgalloc.h> + +static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + set_pmd(pmd, __pmd((unsigned long)pte)); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + set_pmd(pmd, __pmd((unsigned long)page_address(pte))); +} + +#ifndef __PAGETABLE_PMD_FOLDED + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud((unsigned long)pmd)); +} +#endif + +#ifndef __PAGETABLE_PUD_FOLDED + +static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + set_p4d(p4d, __p4d((unsigned long)pud)); +} + +#endif /* __PAGETABLE_PUD_FOLDED */ + +extern void pagetable_init(void); + +/* + * Initialize a new pmd table with invalid pointers. + */ +extern void pmd_init(unsigned long page, unsigned long pagetable); + +/* + * Initialize a new pgd / pmd table with invalid pointers. + */ +extern void pgd_init(unsigned long page); +extern pgd_t *pgd_alloc(struct mm_struct *mm); + +#define __pte_free_tlb(tlb, pte, address) \ +do { \ + pgtable_pte_page_dtor(pte); \ + tlb_remove_page((tlb), pte); \ +} while (0) + +#ifndef __PAGETABLE_PMD_FOLDED + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) +{ + pmd_t *pmd; + struct page *pg; + + pg = alloc_page(GFP_KERNEL_ACCOUNT); + if (!pg) + return NULL; + + if (!pgtable_pmd_page_ctor(pg)) { + __free_page(pg); + return NULL; + } + + pmd = (pmd_t *)page_address(pg); + pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); + return pmd; +} + +#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) + +#endif + +#ifndef __PAGETABLE_PUD_FOLDED + +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) +{ + pud_t *pud; + + pud = (pud_t *) __get_free_page(GFP_KERNEL); + if (pud) + pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); + return pud; +} + +#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x) + +#endif /* __PAGETABLE_PUD_FOLDED */ + +#endif /* _ASM_PGALLOC_H */ diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h new file mode 100644 index 000000000..5f2ebcea5 --- /dev/null +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PGTABLE_BITS_H +#define _ASM_PGTABLE_BITS_H + +/* Page table bits */ +#define _PAGE_VALID_SHIFT 0 +#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */ +#define _PAGE_DIRTY_SHIFT 1 +#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */ +#define _CACHE_SHIFT 4 /* 4~5, two bits */ +#define _PAGE_GLOBAL_SHIFT 6 +#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */ +#define _PAGE_PRESENT_SHIFT 7 +#define _PAGE_WRITE_SHIFT 8 +#define _PAGE_MODIFIED_SHIFT 9 +#define _PAGE_PROTNONE_SHIFT 10 +#define _PAGE_SPECIAL_SHIFT 11 +#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ +#define _PAGE_PFN_SHIFT 12 +#define _PAGE_PFN_END_SHIFT 48 +#define _PAGE_PRESENT_INVALID_SHIFT 60 +#define _PAGE_NO_READ_SHIFT 61 +#define _PAGE_NO_EXEC_SHIFT 62 +#define _PAGE_RPLV_SHIFT 63 + +/* Used by software */ +#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT) +#define _PAGE_PRESENT_INVALID (_ULCAST_(1) << _PAGE_PRESENT_INVALID_SHIFT) +#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT) +#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) +#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) +#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) + +/* Used by TLB hardware (placed in EntryLo*) */ +#define _PAGE_VALID (_ULCAST_(1) << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY (_ULCAST_(1) << _PAGE_DIRTY_SHIFT) +#define _PAGE_PLV (_ULCAST_(3) << _PAGE_PLV_SHIFT) +#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT) +#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT) +#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT) +#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT) +#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT) +#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT) +#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT) +#define _PFN_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT) + +#define _PAGE_USER (PLV_USER << _PAGE_PLV_SHIFT) +#define _PAGE_KERN (PLV_KERN << _PAGE_PLV_SHIFT) + +#define _PFN_MASK (~((_ULCAST_(1) << (_PFN_SHIFT)) - 1) & \ + ((_ULCAST_(1) << (_PAGE_PFN_END_SHIFT)) - 1)) + +/* + * Cache attributes + */ +#ifndef _CACHE_SUC +#define _CACHE_SUC (0<<_CACHE_SHIFT) /* Strong-ordered UnCached */ +#endif +#ifndef _CACHE_CC +#define _CACHE_CC (1<<_CACHE_SHIFT) /* Coherent Cached */ +#endif +#ifndef _CACHE_WUC +#define _CACHE_WUC (2<<_CACHE_SHIFT) /* Weak-ordered UnCached */ +#endif + +#define __READABLE (_PAGE_VALID) +#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE) + +#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) +#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) + +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \ + _PAGE_USER | _CACHE_CC) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ + _PAGE_USER | _CACHE_CC) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _CACHE_CC) + +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_KERN | _CACHE_CC) +#define PAGE_KERNEL_SUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC) +#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) + +#ifndef __ASSEMBLY__ + +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC) + +#define pgprot_noncached pgprot_noncached + +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_SUC; + + return __pgprot(prot); +} + +#define pgprot_writecombine pgprot_writecombine + +static inline pgprot_t pgprot_writecombine(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_WUC; + + return __pgprot(prot); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_BITS_H */ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h new file mode 100644 index 000000000..f991e678c --- /dev/null +++ b/arch/loongarch/include/asm/pgtable.h @@ -0,0 +1,575 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. + */ +#ifndef _ASM_PGTABLE_H +#define _ASM_PGTABLE_H + +#include <linux/compiler.h> +#include <asm/addrspace.h> +#include <asm/pgtable-bits.h> + +#if CONFIG_PGTABLE_LEVELS == 2 +#include <asm-generic/pgtable-nopmd.h> +#elif CONFIG_PGTABLE_LEVELS == 3 +#include <asm-generic/pgtable-nopud.h> +#else +#include <asm-generic/pgtable-nop4d.h> +#endif + +#if CONFIG_PGTABLE_LEVELS == 2 +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#elif CONFIG_PGTABLE_LEVELS == 3 +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) +#elif CONFIG_PGTABLE_LEVELS == 4 +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) +#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) +#endif + +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) + +#define PTRS_PER_PGD (PAGE_SIZE >> 3) +#if CONFIG_PGTABLE_LEVELS > 3 +#define PTRS_PER_PUD (PAGE_SIZE >> 3) +#endif +#if CONFIG_PGTABLE_LEVELS > 2 +#define PTRS_PER_PMD (PAGE_SIZE >> 3) +#endif +#define PTRS_PER_PTE (PAGE_SIZE >> 3) + +#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) + +#ifndef __ASSEMBLY__ + +#include <linux/mm_types.h> +#include <linux/mmzone.h> +#include <asm/fixmap.h> + +struct mm_struct; +struct vm_area_struct; + +/* + * ZERO_PAGE is a global shared page that is always zero; used + * for zero-mapped memory areas etc.. + */ + +extern unsigned long empty_zero_page; +extern unsigned long zero_page_mask; + +#define ZERO_PAGE(vaddr) \ + (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) +#define __HAVE_COLOR_ZERO_PAGE + +/* + * TLB refill handlers may also map the vmalloc area into xkvrange. + * Avoid the first couple of pages so NULL pointer dereferences will + * still reliably trap. + */ +#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) +#define MODULES_END (MODULES_VADDR + SZ_256M) + +#define VMALLOC_START MODULES_END +#define VMALLOC_END \ + (vm_map_base + \ + min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#ifndef __PAGETABLE_PMD_FOLDED +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#endif +#ifndef __PAGETABLE_PUD_FOLDED +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#endif +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) + +extern pte_t invalid_pte_table[PTRS_PER_PTE]; + +#ifndef __PAGETABLE_PUD_FOLDED + +typedef struct { unsigned long pud; } pud_t; +#define pud_val(x) ((x).pud) +#define __pud(x) ((pud_t) { (x) }) + +extern pud_t invalid_pud_table[PTRS_PER_PUD]; + +/* + * Empty pgd/p4d entries point to the invalid_pud_table. + */ +static inline int p4d_none(p4d_t p4d) +{ + return p4d_val(p4d) == (unsigned long)invalid_pud_table; +} + +static inline int p4d_bad(p4d_t p4d) +{ + return p4d_val(p4d) & ~PAGE_MASK; +} + +static inline int p4d_present(p4d_t p4d) +{ + return p4d_val(p4d) != (unsigned long)invalid_pud_table; +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + p4d_val(*p4dp) = (unsigned long)invalid_pud_table; +} + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + return (pud_t *)p4d_val(p4d); +} + +static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) +{ + *p4d = p4dval; +} + +#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) +#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) + +#endif + +#ifndef __PAGETABLE_PMD_FOLDED + +typedef struct { unsigned long pmd; } pmd_t; +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) }) + +extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; + +/* + * Empty pud entries point to the invalid_pmd_table. + */ +static inline int pud_none(pud_t pud) +{ + return pud_val(pud) == (unsigned long)invalid_pmd_table; +} + +static inline int pud_bad(pud_t pud) +{ + return pud_val(pud) & ~PAGE_MASK; +} + +static inline int pud_present(pud_t pud) +{ + return pud_val(pud) != (unsigned long)invalid_pmd_table; +} + +static inline void pud_clear(pud_t *pudp) +{ + pud_val(*pudp) = ((unsigned long)invalid_pmd_table); +} + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)pud_val(pud); +} + +#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) + +#define pud_phys(pud) PHYSADDR(pud_val(pud)) +#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) + +#endif + +/* + * Empty pmd entries point to the invalid_pte_table. + */ +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == (unsigned long)invalid_pte_table; +} + +static inline int pmd_bad(pmd_t pmd) +{ + return (pmd_val(pmd) & ~PAGE_MASK); +} + +static inline int pmd_present(pmd_t pmd) +{ + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) + return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID)); + + return pmd_val(pmd) != (unsigned long)invalid_pte_table; +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); +} + +#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) + +#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) + +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#define pmd_page_vaddr(pmd) pmd_val(pmd) + +extern pmd_t mk_pmd(struct page *page, pgprot_t prot); +extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); + +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT)) +#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) + +/* + * Initialize a new pgd / pmd table with invalid pointers. + */ +extern void pgd_init(unsigned long page); +extern void pud_init(unsigned long page, unsigned long pagetable); +extern void pmd_init(unsigned long page, unsigned long pagetable); + +/* + * Non-present pages: high 40 bits are offset, next 8 bits type, + * low 16 bits zero. + */ +static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) +{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; } + +#define __swp_type(x) (((x).val >> 16) & 0xff) +#define __swp_offset(x) ((x).val >> 24) +#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) +#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) + +extern void paging_init(void); + +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) +#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) + +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; + if (pte_val(pteval) & _PAGE_GLOBAL) { + pte_t *buddy = ptep_buddy(ptep); + /* + * Make sure the buddy is global too (if it's !none, + * it better already be global) + */ +#ifdef CONFIG_SMP + /* + * For SMP, multiple CPUs can race, so we need to do + * this atomically. + */ + unsigned long page_global = _PAGE_GLOBAL; + unsigned long tmp; + + __asm__ __volatile__ ( + "1:" __LL "%[tmp], %[buddy] \n" + " bnez %[tmp], 2f \n" + " or %[tmp], %[tmp], %[global] \n" + __SC "%[tmp], %[buddy] \n" + " beqz %[tmp], 1b \n" + " nop \n" + "2: \n" + __WEAK_LLSC_MB + : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) + : [global] "r" (page_global)); +#else /* !CONFIG_SMP */ + if (pte_none(*buddy)) + pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; +#endif /* CONFIG_SMP */ + } +} + +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + set_pte(ptep, pteval); +} + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + /* Preserve global status for the pair */ + if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) + set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); + else + set_pte_at(mm, addr, ptep, __pte(0)); +} + +#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) +#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) +#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) + +extern pgd_t swapper_pg_dir[]; +extern pgd_t invalid_pg_dir[]; + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_ACCESSED; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); + return pte; +} + +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } + +static inline pte_t pte_mkhuge(pte_t pte) +{ + pte_val(pte) |= _PAGE_HUGE; + return pte; +} + +#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } +static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + +#define pte_accessible pte_accessible +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) +{ + if (pte_val(a) & _PAGE_PRESENT) + return true; + + if ((pte_val(a) & _PAGE_PROTNONE) && + atomic_read(&mm->tlb_flush_pending)) + return true; + + return false; +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | + (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); +} + +extern void __update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + __update_tlb(vma, address, ptep); +} + +#define __HAVE_ARCH_UPDATE_MMU_TLB +#define update_mmu_tlb update_mmu_cache + +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + __update_tlb(vma, address, (pte_t *)pmdp); +} + +#define kern_addr_valid(addr) (1) + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ +#define pmdp_establish generic_pmdp_establish + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd); +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) | + ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)); + pmd_val(pmd) |= _PAGE_HUGE; + + return pmd; +} + +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_WRITE); +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_WRITE; + if (pmd_val(pmd) & _PAGE_MODIFIED) + pmd_val(pmd) |= _PAGE_DIRTY; + return pmd; +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY); + return pmd; +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_MODIFIED); +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_MODIFIED; + if (pmd_val(pmd) & _PAGE_WRITE) + pmd_val(pmd) |= _PAGE_DIRTY; + return pmd; +} + +#define pmd_young pmd_young +static inline int pmd_young(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_ACCESSED); +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_ACCESSED; + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_ACCESSED; + return pmd; +} + +static inline struct page *pmd_page(pmd_t pmd) +{ + if (pmd_trans_huge(pmd)) + return pfn_to_page(pmd_pfn(pmd)); + + return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) | + (pgprot_val(newprot) & ~_HPAGE_CHG_MASK); + return pmd; +} + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_PRESENT_INVALID; + pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE); + + return pmd; +} + +/* + * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a + * different prototype. + */ +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old = *pmdp; + + pmd_clear(pmdp); + + return old; +} + +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifdef CONFIG_NUMA_BALANCING +static inline long pte_protnone(pte_t pte) +{ + return (pte_val(pte) & _PAGE_PROTNONE); +} + +static inline long pmd_protnone(pmd_t pmd) +{ + return (pmd_val(pmd) & _PAGE_PROTNONE); +} +#endif /* CONFIG_NUMA_BALANCING */ + +#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) +#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) + +/* + * We provide our own get_unmapped area to cope with the virtual aliasing + * constraints placed on us by the cache architecture. + */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_H */ diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h new file mode 100644 index 000000000..1672262a5 --- /dev/null +++ b/arch/loongarch/include/asm/prefetch.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_PREFETCH_H +#define __ASM_PREFETCH_H + +#define Pref_Load 0 +#define Pref_Store 8 + +#ifdef __ASSEMBLY__ + + .macro __pref hint addr +#ifdef CONFIG_CPU_HAS_PREFETCH + preld \hint, \addr, 0 +#endif + .endm + + .macro pref_load addr + __pref Pref_Load, \addr + .endm + + .macro pref_store addr + __pref Pref_Store, \addr + .endm + +#endif + +#endif /* __ASM_PREFETCH_H */ diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h new file mode 100644 index 000000000..7184f1dc6 --- /dev/null +++ b/arch/loongarch/include/asm/processor.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PROCESSOR_H +#define _ASM_PROCESSOR_H + +#include <linux/atomic.h> +#include <linux/cpumask.h> +#include <linux/sizes.h> + +#include <asm/cpu.h> +#include <asm/cpu-info.h> +#include <asm/loongarch.h> +#include <asm/vdso/processor.h> +#include <uapi/asm/ptrace.h> +#include <uapi/asm/sigcontext.h> + +#ifdef CONFIG_32BIT + +#define TASK_SIZE 0x80000000UL +#define TASK_SIZE_MIN TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE + +#define TASK_IS_32BIT_ADDR 1 + +#endif + +#ifdef CONFIG_64BIT + +#define TASK_SIZE32 0x100000000UL +#define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits)) + +#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) +#define TASK_SIZE_MIN TASK_SIZE32 +#define STACK_TOP_MAX TASK_SIZE64 + +#define TASK_SIZE_OF(tsk) \ + (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) + +#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR) + +#endif + +#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M) + +unsigned long stack_top(void); +#define STACK_TOP stack_top() + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) + +#define FPU_REG_WIDTH 256 +#define FPU_ALIGN __attribute__((aligned(32))) + +union fpureg { + __u32 val32[FPU_REG_WIDTH / 32]; + __u64 val64[FPU_REG_WIDTH / 64]; +}; + +#define FPR_IDX(width, idx) (idx) + +#define BUILD_FPR_ACCESS(width) \ +static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \ +{ \ + return fpr->val##width[FPR_IDX(width, idx)]; \ +} \ + \ +static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \ + u##width val) \ +{ \ + fpr->val##width[FPR_IDX(width, idx)] = val; \ +} + +BUILD_FPR_ACCESS(32) +BUILD_FPR_ACCESS(64) + +struct loongarch_fpu { + unsigned int fcsr; + uint64_t fcc; /* 8x8 */ + union fpureg fpr[NUM_FPU_REGS]; +}; + +#define INIT_CPUMASK { \ + {0,} \ +} + +#define ARCH_MIN_TASKALIGN 32 + +struct loongarch_vdso_info; + +/* + * If you change thread_struct remember to change the #defines below too! + */ +struct thread_struct { + /* Main processor registers. */ + unsigned long reg01, reg03, reg22; /* ra sp fp */ + unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */ + unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */ + + /* __schedule() return address / call frame address */ + unsigned long sched_ra; + unsigned long sched_cfa; + + /* CSR registers */ + unsigned long csr_prmd; + unsigned long csr_crmd; + unsigned long csr_euen; + unsigned long csr_ecfg; + unsigned long csr_badvaddr; /* Last user fault */ + + /* Scratch registers */ + unsigned long scr0; + unsigned long scr1; + unsigned long scr2; + unsigned long scr3; + + /* Eflags register */ + unsigned long eflags; + + /* Other stuff associated with the thread. */ + unsigned long trap_nr; + unsigned long error_code; + struct loongarch_vdso_info *vdso; + + /* + * FPU & vector registers, must be at last because + * they are conditionally copied at fork(). + */ + struct loongarch_fpu fpu FPU_ALIGN; +}; + +#define thread_saved_ra(tsk) (tsk->thread.sched_ra) +#define thread_saved_fp(tsk) (tsk->thread.sched_cfa) + +#define INIT_THREAD { \ + /* \ + * Main processor registers \ + */ \ + .reg01 = 0, \ + .reg03 = 0, \ + .reg22 = 0, \ + .reg23 = 0, \ + .reg24 = 0, \ + .reg25 = 0, \ + .reg26 = 0, \ + .reg27 = 0, \ + .reg28 = 0, \ + .reg29 = 0, \ + .reg30 = 0, \ + .reg31 = 0, \ + .sched_ra = 0, \ + .sched_cfa = 0, \ + .csr_crmd = 0, \ + .csr_prmd = 0, \ + .csr_euen = 0, \ + .csr_ecfg = 0, \ + .csr_badvaddr = 0, \ + /* \ + * Other stuff associated with the process \ + */ \ + .trap_nr = 0, \ + .error_code = 0, \ + /* \ + * FPU & vector registers \ + */ \ + .fpu = { \ + .fcsr = 0, \ + .fcc = 0, \ + .fpr = {{{0,},},}, \ + }, \ +} + +struct task_struct; + +enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL}; + +extern unsigned long boot_option_idle_override; +/* + * Do necessary setup to start up a newly executed thread. + */ +extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); + +static inline void flush_thread(void) +{ +} + +unsigned long __get_wchan(struct task_struct *p); + +#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \ + THREAD_SIZE - sizeof(struct pt_regs)) +#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk)) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3]) +#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen) +#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg) + +#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);}) + +#ifdef CONFIG_CPU_HAS_PREFETCH + +#define ARCH_HAS_PREFETCH +#define prefetch(x) __builtin_prefetch((x), 0, 1) + +#define ARCH_HAS_PREFETCHW +#define prefetchw(x) __builtin_prefetch((x), 1, 1) + +#endif + +#endif /* _ASM_PROCESSOR_H */ diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h new file mode 100644 index 000000000..59c4608de --- /dev/null +++ b/arch/loongarch/include/asm/ptrace.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PTRACE_H +#define _ASM_PTRACE_H + +#include <asm/page.h> +#include <asm/thread_info.h> +#include <uapi/asm/ptrace.h> + +/* + * This struct defines the way the registers are stored on the stack during + * a system call/exception. If you add a register here, please also add it to + * regoffset_table[] in arch/loongarch/kernel/ptrace.c. + */ +struct pt_regs { + /* Main processor registers. */ + unsigned long regs[32]; + + /* Original syscall arg0. */ + unsigned long orig_a0; + + /* Special CSR registers. */ + unsigned long csr_era; + unsigned long csr_badvaddr; + unsigned long csr_crmd; + unsigned long csr_prmd; + unsigned long csr_euen; + unsigned long csr_ecfg; + unsigned long csr_estat; + unsigned long __last[]; +} __aligned(8); + +static inline int regs_irqs_disabled(struct pt_regs *regs) +{ + return arch_irqs_disabled_flags(regs->csr_prmd); +} + +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->regs[3]; +} + +/* + * Don't use asm-generic/ptrace.h it defines FP accessors that don't make + * sense on LoongArch. We rather want an error if they get invoked. + */ + +static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val) +{ + regs->csr_era = val; +} + +/* Query offset/name of register from its name/offset */ +extern int regs_query_register_offset(const char *name); +#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten. + * @offset: offset number of the register. + * + * regs_get_register returns the value of a register. The @offset is the + * offset of the register in struct pt_regs address which specified by @regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + +struct task_struct; + +/* + * Does the process account for user or for system time? + */ +#define user_mode(regs) (((regs)->csr_prmd & PLV_MASK) == PLV_USER) + +static inline long regs_return_value(struct pt_regs *regs) +{ + return regs->regs[4]; +} + +#define instruction_pointer(regs) ((regs)->csr_era) +#define profile_pc(regs) instruction_pointer(regs) + +extern void die(const char *, struct pt_regs *) __noreturn; + +static inline void die_if_kernel(const char *str, struct pt_regs *regs) +{ + if (unlikely(!user_mode(regs))) + die(str, regs); +} + +#define current_pt_regs() \ +({ \ + unsigned long sp = (unsigned long)__builtin_frame_address(0); \ + (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1) - 1; \ +}) + +/* Helpers for working with the user stack pointer */ + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return regs->regs[3]; +} + +static inline void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->regs[3] = val; +} + +#endif /* _ASM_PTRACE_H */ diff --git a/arch/loongarch/include/asm/regdef.h b/arch/loongarch/include/asm/regdef.h new file mode 100644 index 000000000..49a374c26 --- /dev/null +++ b/arch/loongarch/include/asm/regdef.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_REGDEF_H +#define _ASM_REGDEF_H + +#define zero $r0 /* wired zero */ +#define ra $r1 /* return address */ +#define tp $r2 +#define sp $r3 /* stack pointer */ +#define a0 $r4 /* argument registers, a0/a1 reused as v0/v1 for return value */ +#define a1 $r5 +#define a2 $r6 +#define a3 $r7 +#define a4 $r8 +#define a5 $r9 +#define a6 $r10 +#define a7 $r11 +#define t0 $r12 /* caller saved */ +#define t1 $r13 +#define t2 $r14 +#define t3 $r15 +#define t4 $r16 +#define t5 $r17 +#define t6 $r18 +#define t7 $r19 +#define t8 $r20 +#define u0 $r21 +#define fp $r22 /* frame pointer */ +#define s0 $r23 /* callee saved */ +#define s1 $r24 +#define s2 $r25 +#define s3 $r26 +#define s4 $r27 +#define s5 $r28 +#define s6 $r29 +#define s7 $r30 +#define s8 $r31 + +#endif /* _ASM_REGDEF_H */ diff --git a/arch/loongarch/include/asm/seccomp.h b/arch/loongarch/include/asm/seccomp.h new file mode 100644 index 000000000..31d6ab42e --- /dev/null +++ b/arch/loongarch/include/asm/seccomp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include <asm/unistd.h> + +#include <asm-generic/seccomp.h> + +#ifdef CONFIG_32BIT +# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_LOONGARCH32 +# define SECCOMP_ARCH_NATIVE_NR NR_syscalls +# define SECCOMP_ARCH_NATIVE_NAME "loongarch32" +#else +# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_LOONGARCH64 +# define SECCOMP_ARCH_NATIVE_NR NR_syscalls +# define SECCOMP_ARCH_NATIVE_NAME "loongarch64" +#endif + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/loongarch/include/asm/serial.h b/arch/loongarch/include/asm/serial.h new file mode 100644 index 000000000..3fb550eb9 --- /dev/null +++ b/arch/loongarch/include/asm/serial.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM__SERIAL_H +#define __ASM__SERIAL_H + +#define BASE_BAUD 0 +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) + +#endif /* __ASM__SERIAL_H */ diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h new file mode 100644 index 000000000..ca373f8e3 --- /dev/null +++ b/arch/loongarch/include/asm/setup.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _LOONGARCH_SETUP_H +#define _LOONGARCH_SETUP_H + +#include <linux/types.h> +#include <uapi/asm/setup.h> + +#define VECSIZE 0x200 + +extern unsigned long eentry; +extern unsigned long tlbrentry; +extern void tlb_init(int cpu); +extern void cpu_cache_init(void); +extern void cache_error_setup(void); +extern void per_cpu_trap_init(int cpu); +extern void set_handler(unsigned long offset, void *addr, unsigned long len); +extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len); + +#endif /* __SETUP_H */ diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h new file mode 100644 index 000000000..c9554f48d --- /dev/null +++ b/arch/loongarch/include/asm/shmparam.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SHMPARAM_H +#define _ASM_SHMPARAM_H + +#define __ARCH_FORCE_SHMLBA 1 + +#define SHMLBA SZ_64K /* attach addr a multiple of this */ + +#endif /* _ASM_SHMPARAM_H */ diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h new file mode 100644 index 000000000..d82687390 --- /dev/null +++ b/arch/loongarch/include/asm/smp.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include <linux/atomic.h> +#include <linux/bitops.h> +#include <linux/linkage.h> +#include <linux/threads.h> +#include <linux/cpumask.h> + +extern int smp_num_siblings; +extern int num_processors; +extern int disabled_cpus; +extern cpumask_t cpu_sibling_map[]; +extern cpumask_t cpu_core_map[]; +extern cpumask_t cpu_foreign_map[]; + +void loongson_smp_setup(void); +void loongson_prepare_cpus(unsigned int max_cpus); +void loongson_boot_secondary(int cpu, struct task_struct *idle); +void loongson_init_secondary(void); +void loongson_smp_finish(void); +void loongson_send_ipi_single(int cpu, unsigned int action); +void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action); +#ifdef CONFIG_HOTPLUG_CPU +int loongson_cpu_disable(void); +void loongson_cpu_die(unsigned int cpu); +#endif + +static inline void plat_smp_setup(void) +{ + loongson_smp_setup(); +} + +static inline int raw_smp_processor_id(void) +{ +#if defined(__VDSO__) + extern int vdso_smp_processor_id(void) + __compiletime_error("VDSO should not call smp_processor_id()"); + return vdso_smp_processor_id(); +#else + return current_thread_info()->cpu; +#endif +} +#define raw_smp_processor_id raw_smp_processor_id + +/* Map from cpu id to sequential logical cpu number. This will only + * not be idempotent when cpus failed to come on-line. */ +extern int __cpu_number_map[NR_CPUS]; +#define cpu_number_map(cpu) __cpu_number_map[cpu] + +/* The reverse map from sequential logical cpu number to cpu id. */ +extern int __cpu_logical_map[NR_CPUS]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] + +#define cpu_physical_id(cpu) cpu_logical_map(cpu) + +#define SMP_BOOT_CPU 0x1 +#define SMP_RESCHEDULE 0x2 +#define SMP_CALL_FUNCTION 0x4 + +struct secondary_data { + unsigned long stack; + unsigned long thread_info; +}; +extern struct secondary_data cpuboot_data; + +extern asmlinkage void smpboot_entry(void); + +extern void calculate_cpu_foreign_map(void); + +/* + * Generate IPI list text + */ +extern void show_ipi_list(struct seq_file *p, int prec); + +static inline void arch_send_call_function_single_ipi(int cpu) +{ + loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION); +} + +static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION); +} + +#ifdef CONFIG_HOTPLUG_CPU +static inline int __cpu_disable(void) +{ + return loongson_cpu_disable(); +} + +static inline void __cpu_die(unsigned int cpu) +{ + loongson_cpu_die(cpu); +} + +extern void play_dead(void); +#endif + +#endif /* __ASM_SMP_H */ diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h new file mode 100644 index 000000000..3d18cdf1b --- /dev/null +++ b/arch/loongarch/include/asm/sparsemem.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LOONGARCH_SPARSEMEM_H +#define _LOONGARCH_SPARSEMEM_H + +#ifdef CONFIG_SPARSEMEM + +/* + * SECTION_SIZE_BITS 2^N: how big each section will be + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space + */ +#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */ +#define MAX_PHYSMEM_BITS 48 + +#endif /* CONFIG_SPARSEMEM */ + +#ifdef CONFIG_MEMORY_HOTPLUG +int memory_add_physaddr_to_nid(u64 addr); +#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid +#endif + +#define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS) + +#endif /* _LOONGARCH_SPARSEMEM_H */ diff --git a/arch/loongarch/include/asm/spinlock.h b/arch/loongarch/include/asm/spinlock.h new file mode 100644 index 000000000..7cb347699 --- /dev/null +++ b/arch/loongarch/include/asm/spinlock.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SPINLOCK_H +#define _ASM_SPINLOCK_H + +#include <asm/processor.h> +#include <asm/qspinlock.h> +#include <asm/qrwlock.h> + +#endif /* _ASM_SPINLOCK_H */ diff --git a/arch/loongarch/include/asm/spinlock_types.h b/arch/loongarch/include/asm/spinlock_types.h new file mode 100644 index 000000000..7458d036c --- /dev/null +++ b/arch/loongarch/include/asm/spinlock_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SPINLOCK_TYPES_H +#define _ASM_SPINLOCK_TYPES_H + +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> + +#endif diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h new file mode 100644 index 000000000..4ca953062 --- /dev/null +++ b/arch/loongarch/include/asm/stackframe.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_STACKFRAME_H +#define _ASM_STACKFRAME_H + +#include <linux/threads.h> + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/asm-offsets.h> +#include <asm/loongarch.h> +#include <asm/thread_info.h> + +/* Make the addition of cfi info a little easier. */ + .macro cfi_rel_offset reg offset=0 docfi=0 + .if \docfi + .cfi_rel_offset \reg, \offset + .endif + .endm + + .macro cfi_st reg offset=0 docfi=0 + cfi_rel_offset \reg, \offset, \docfi + LONG_S \reg, sp, \offset + .endm + + .macro cfi_restore reg offset=0 docfi=0 + .if \docfi + .cfi_restore \reg + .endif + .endm + + .macro cfi_ld reg offset=0 docfi=0 + LONG_L \reg, sp, \offset + cfi_restore \reg \offset \docfi + .endm + + .macro BACKUP_T0T1 + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + .endm + + .macro RELOAD_T0T1 + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + .endm + + .macro SAVE_TEMP docfi=0 + RELOAD_T0T1 + cfi_st t0, PT_R12, \docfi + cfi_st t1, PT_R13, \docfi + cfi_st t2, PT_R14, \docfi + cfi_st t3, PT_R15, \docfi + cfi_st t4, PT_R16, \docfi + cfi_st t5, PT_R17, \docfi + cfi_st t6, PT_R18, \docfi + cfi_st t7, PT_R19, \docfi + cfi_st t8, PT_R20, \docfi + .endm + + .macro SAVE_STATIC docfi=0 + cfi_st s0, PT_R23, \docfi + cfi_st s1, PT_R24, \docfi + cfi_st s2, PT_R25, \docfi + cfi_st s3, PT_R26, \docfi + cfi_st s4, PT_R27, \docfi + cfi_st s5, PT_R28, \docfi + cfi_st s6, PT_R29, \docfi + cfi_st s7, PT_R30, \docfi + cfi_st s8, PT_R31, \docfi + .endm + +/* + * get_saved_sp returns the SP for the current CPU by looking in the + * kernelsp array for it. It stores the current sp in t0 and loads the + * new value in sp. + */ + .macro get_saved_sp docfi=0 + la.abs t1, kernelsp +#ifdef CONFIG_SMP + csrrd t0, PERCPU_BASE_KS + LONG_ADD t1, t1, t0 +#endif + move t0, sp + .if \docfi + .cfi_register sp, t0 + .endif + LONG_L sp, t1, 0 + .endm + + .macro set_saved_sp stackp temp temp2 + la.abs \temp, kernelsp +#ifdef CONFIG_SMP + LONG_ADD \temp, \temp, u0 +#endif + LONG_S \stackp, \temp, 0 + .endm + + .macro SAVE_SOME docfi=0 + csrrd t1, LOONGARCH_CSR_PRMD + andi t1, t1, 0x3 /* extract pplv bit */ + move t0, sp + beqz t1, 8f + /* Called from user mode, new stack. */ + get_saved_sp docfi=\docfi +8: + PTR_ADDI sp, sp, -PT_SIZE + .if \docfi + .cfi_def_cfa sp, 0 + .endif + cfi_st t0, PT_R3, \docfi + cfi_rel_offset sp, PT_R3, \docfi + LONG_S zero, sp, PT_R0 + csrrd t0, LOONGARCH_CSR_PRMD + LONG_S t0, sp, PT_PRMD + csrrd t0, LOONGARCH_CSR_CRMD + LONG_S t0, sp, PT_CRMD + csrrd t0, LOONGARCH_CSR_EUEN + LONG_S t0, sp, PT_EUEN + csrrd t0, LOONGARCH_CSR_ECFG + LONG_S t0, sp, PT_ECFG + csrrd t0, LOONGARCH_CSR_ESTAT + PTR_S t0, sp, PT_ESTAT + cfi_st ra, PT_R1, \docfi + cfi_st a0, PT_R4, \docfi + cfi_st a1, PT_R5, \docfi + cfi_st a2, PT_R6, \docfi + cfi_st a3, PT_R7, \docfi + cfi_st a4, PT_R8, \docfi + cfi_st a5, PT_R9, \docfi + cfi_st a6, PT_R10, \docfi + cfi_st a7, PT_R11, \docfi + csrrd ra, LOONGARCH_CSR_ERA + LONG_S ra, sp, PT_ERA + .if \docfi + .cfi_rel_offset ra, PT_ERA + .endif + cfi_st tp, PT_R2, \docfi + cfi_st fp, PT_R22, \docfi + + /* Set thread_info if we're coming from user mode */ + csrrd t0, LOONGARCH_CSR_PRMD + andi t0, t0, 0x3 /* extract pplv bit */ + beqz t0, 9f + + li.d tp, ~_THREAD_MASK + and tp, tp, sp + cfi_st u0, PT_R21, \docfi + csrrd u0, PERCPU_BASE_KS +9: + .endm + + .macro SAVE_ALL docfi=0 + SAVE_SOME \docfi + SAVE_TEMP \docfi + SAVE_STATIC \docfi + .endm + + .macro RESTORE_TEMP docfi=0 + cfi_ld t0, PT_R12, \docfi + cfi_ld t1, PT_R13, \docfi + cfi_ld t2, PT_R14, \docfi + cfi_ld t3, PT_R15, \docfi + cfi_ld t4, PT_R16, \docfi + cfi_ld t5, PT_R17, \docfi + cfi_ld t6, PT_R18, \docfi + cfi_ld t7, PT_R19, \docfi + cfi_ld t8, PT_R20, \docfi + .endm + + .macro RESTORE_STATIC docfi=0 + cfi_ld s0, PT_R23, \docfi + cfi_ld s1, PT_R24, \docfi + cfi_ld s2, PT_R25, \docfi + cfi_ld s3, PT_R26, \docfi + cfi_ld s4, PT_R27, \docfi + cfi_ld s5, PT_R28, \docfi + cfi_ld s6, PT_R29, \docfi + cfi_ld s7, PT_R30, \docfi + cfi_ld s8, PT_R31, \docfi + .endm + + .macro RESTORE_SOME docfi=0 + LONG_L a0, sp, PT_PRMD + andi a0, a0, 0x3 /* extract pplv bit */ + beqz a0, 8f + cfi_ld u0, PT_R21, \docfi +8: + LONG_L a0, sp, PT_ERA + csrwr a0, LOONGARCH_CSR_ERA + LONG_L a0, sp, PT_PRMD + csrwr a0, LOONGARCH_CSR_PRMD + cfi_ld ra, PT_R1, \docfi + cfi_ld a0, PT_R4, \docfi + cfi_ld a1, PT_R5, \docfi + cfi_ld a2, PT_R6, \docfi + cfi_ld a3, PT_R7, \docfi + cfi_ld a4, PT_R8, \docfi + cfi_ld a5, PT_R9, \docfi + cfi_ld a6, PT_R10, \docfi + cfi_ld a7, PT_R11, \docfi + cfi_ld tp, PT_R2, \docfi + cfi_ld fp, PT_R22, \docfi + .endm + + .macro RESTORE_SP_AND_RET docfi=0 + cfi_ld sp, PT_R3, \docfi + ertn + .endm + + .macro RESTORE_ALL_AND_RET docfi=0 + RESTORE_STATIC \docfi + RESTORE_TEMP \docfi + RESTORE_SOME \docfi + RESTORE_SP_AND_RET \docfi + .endm + +#endif /* _ASM_STACKFRAME_H */ diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h new file mode 100644 index 000000000..f23adb15f --- /dev/null +++ b/arch/loongarch/include/asm/stacktrace.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_STACKTRACE_H +#define _ASM_STACKTRACE_H + +#include <asm/asm.h> +#include <asm/ptrace.h> +#include <asm/loongarch.h> +#include <linux/stringify.h> + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_IRQ, + STACK_TYPE_TASK, +}; + +struct stack_info { + enum stack_type type; + unsigned long begin, end, next_sp; +}; + +struct stack_frame { + unsigned long fp; + unsigned long ra; +}; + +bool in_irq_stack(unsigned long stack, struct stack_info *info); +bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info); +int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info); + +#define STR_LONG_L __stringify(LONG_L) +#define STR_LONG_S __stringify(LONG_S) +#define STR_LONGSIZE __stringify(LONGSIZE) + +#define STORE_ONE_REG(r) \ + STR_LONG_S " $r" __stringify(r)", %1, "STR_LONGSIZE"*"__stringify(r)"\n\t" + +#define CSRRD_ONE_REG(reg) \ + __stringify(csrrd) " %0, "__stringify(reg)"\n\t" + +static __always_inline void prepare_frametrace(struct pt_regs *regs) +{ + __asm__ __volatile__( + /* Save $ra */ + STORE_ONE_REG(1) + /* Use $ra to save PC */ + "pcaddi $ra, 0\n\t" + STR_LONG_S " $ra, %0\n\t" + /* Restore $ra */ + STR_LONG_L " $ra, %1, "STR_LONGSIZE"\n\t" + STORE_ONE_REG(2) + STORE_ONE_REG(3) + STORE_ONE_REG(4) + STORE_ONE_REG(5) + STORE_ONE_REG(6) + STORE_ONE_REG(7) + STORE_ONE_REG(8) + STORE_ONE_REG(9) + STORE_ONE_REG(10) + STORE_ONE_REG(11) + STORE_ONE_REG(12) + STORE_ONE_REG(13) + STORE_ONE_REG(14) + STORE_ONE_REG(15) + STORE_ONE_REG(16) + STORE_ONE_REG(17) + STORE_ONE_REG(18) + STORE_ONE_REG(19) + STORE_ONE_REG(20) + STORE_ONE_REG(21) + STORE_ONE_REG(22) + STORE_ONE_REG(23) + STORE_ONE_REG(24) + STORE_ONE_REG(25) + STORE_ONE_REG(26) + STORE_ONE_REG(27) + STORE_ONE_REG(28) + STORE_ONE_REG(29) + STORE_ONE_REG(30) + STORE_ONE_REG(31) + : "=m" (regs->csr_era) + : "r" (regs->regs) + : "memory"); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_BADV) : "=r" (regs->csr_badvaddr)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_CRMD) : "=r" (regs->csr_crmd)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_PRMD) : "=r" (regs->csr_prmd)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_EUEN) : "=r" (regs->csr_euen)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ECFG) : "=r" (regs->csr_ecfg)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ESTAT) : "=r" (regs->csr_estat)); +} + +#endif /* _ASM_STACKTRACE_H */ diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h new file mode 100644 index 000000000..b07e60ded --- /dev/null +++ b/arch/loongarch/include/asm/string.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_STRING_H +#define _ASM_STRING_H + +extern void *memset(void *__s, int __c, size_t __count); +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); +extern void *memmove(void *__dest, __const__ void *__src, size_t __n); + +#endif /* _ASM_STRING_H */ diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h new file mode 100644 index 000000000..43a5ab162 --- /dev/null +++ b/arch/loongarch/include/asm/switch_to.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SWITCH_TO_H +#define _ASM_SWITCH_TO_H + +#include <asm/cpu-features.h> +#include <asm/fpu.h> + +struct task_struct; + +/** + * __switch_to - switch execution of a task + * @prev: The task previously executed. + * @next: The task to begin executing. + * @next_ti: task_thread_info(next). + * @sched_ra: __schedule return address. + * @sched_cfa: __schedule call frame address. + * + * This function is used whilst scheduling to save the context of prev & load + * the context of next. Returns prev. + */ +extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next, struct thread_info *next_ti, + void *sched_ra, void *sched_cfa); + +/* + * For newly created kernel threads switch_to() will return to + * ret_from_kernel_thread, newly created user threads to ret_from_fork. + * That is, everything following __switch_to() will be skipped for new threads. + * So everything that matters to new threads should be placed before __switch_to(). + */ +#define switch_to(prev, next, last) \ +do { \ + lose_fpu_inatomic(1, prev); \ + (last) = __switch_to(prev, next, task_thread_info(next), \ + __builtin_return_address(0), __builtin_frame_address(0)); \ +} while (0) + +#endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h new file mode 100644 index 000000000..e286dc584 --- /dev/null +++ b/arch/loongarch/include/asm/syscall.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_SYSCALL_H +#define __ASM_LOONGARCH_SYSCALL_H + +#include <linux/compiler.h> +#include <uapi/linux/audit.h> +#include <linux/elf-em.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/uaccess.h> +#include <asm/ptrace.h> +#include <asm/unistd.h> + +extern void *sys_call_table[]; + +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[11]; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->regs[4] = regs->orig_a0; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->regs[4]; + + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[4]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->regs[4] = (long) error ? error : val; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + args[0] = regs->orig_a0; + memcpy(&args[1], ®s->regs[5], 5 * sizeof(long)); +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_LOONGARCH64; +} + +static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) +{ + return false; +} + +#endif /* __ASM_LOONGARCH_SYSCALL_H */ diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h new file mode 100644 index 000000000..b7dd9f19a --- /dev/null +++ b/arch/loongarch/include/asm/thread_info.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * thread_info.h: LoongArch low-level thread information + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include <asm/processor.h> + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ +struct thread_info { + struct task_struct *task; /* main task structure */ + unsigned long flags; /* low level flags */ + unsigned long tp_value; /* thread pointer */ + __u32 cpu; /* current CPU */ + int preempt_count; /* 0 => preemptible, <0 => BUG */ + struct pt_regs *regs; + unsigned long syscall; /* syscall number */ + unsigned long syscall_work; /* SYSCALL_WORK_ flags */ +}; + +/* + * macros/functions for gaining access to the thread information structure + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ +} + +/* How to get the thread information struct from C. */ +register struct thread_info *__current_thread_info __asm__("$tp"); + +static inline struct thread_info *current_thread_info(void) +{ + return __current_thread_info; +} + +register unsigned long current_stack_pointer __asm__("$sp"); + +#endif /* !__ASSEMBLY__ */ + +/* thread information allocation */ +#define THREAD_SIZE SZ_16K +#define THREAD_MASK (THREAD_SIZE - 1UL) +#define THREAD_SIZE_ORDER ilog2(THREAD_SIZE / PAGE_SIZE) +/* + * thread information flags + * - these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */ +#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ +#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ +#define TIF_NOHZ 6 /* in adaptive nohz mode */ +#define TIF_UPROBE 7 /* breakpointed or singlestepping */ +#define TIF_USEDFPU 8 /* FPU was used by this task this quantum (SMP) */ +#define TIF_USEDSIMD 9 /* SIMD has been used this quantum */ +#define TIF_MEMDIE 10 /* is terminating due to OOM killer */ +#define TIF_FIXADE 11 /* Fix address errors in software */ +#define TIF_LOGADE 12 /* Log address errors to syslog */ +#define TIF_32BIT_REGS 13 /* 32-bit general purpose registers */ +#define TIF_32BIT_ADDR 14 /* 32-bit address space */ +#define TIF_LOAD_WATCH 15 /* If set, load watch registers */ +#define TIF_SINGLESTEP 16 /* Single Step */ +#define TIF_LSX_CTX_LIVE 17 /* LSX context must be preserved */ +#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */ + +#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL) +#define _TIF_NOHZ (1<<TIF_NOHZ) +#define _TIF_UPROBE (1<<TIF_UPROBE) +#define _TIF_USEDFPU (1<<TIF_USEDFPU) +#define _TIF_USEDSIMD (1<<TIF_USEDSIMD) +#define _TIF_FIXADE (1<<TIF_FIXADE) +#define _TIF_LOGADE (1<<TIF_LOGADE) +#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) +#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR) +#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) +#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) +#define _TIF_LSX_CTX_LIVE (1<<TIF_LSX_CTX_LIVE) +#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE) + +#endif /* __KERNEL__ */ +#endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/loongarch/include/asm/time.h b/arch/loongarch/include/asm/time.h new file mode 100644 index 000000000..2eae21930 --- /dev/null +++ b/arch/loongarch/include/asm/time.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_TIME_H +#define _ASM_TIME_H + +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <asm/loongarch.h> + +extern u64 cpu_clock_freq; +extern u64 const_clock_freq; + +extern void sync_counter(void); + +static inline unsigned int calc_const_freq(void) +{ + unsigned int res; + unsigned int base_freq; + unsigned int cfm, cfd; + + res = read_cpucfg(LOONGARCH_CPUCFG2); + if (!(res & CPUCFG2_LLFTP)) + return 0; + + base_freq = read_cpucfg(LOONGARCH_CPUCFG4); + res = read_cpucfg(LOONGARCH_CPUCFG5); + cfm = res & 0xffff; + cfd = (res >> 16) & 0xffff; + + if (!base_freq || !cfm || !cfd) + return 0; + + return (base_freq * cfm / cfd); +} + +/* + * Initialize the calling CPU's timer interrupt as clockevent device + */ +extern int constant_clockevent_init(void); +extern int constant_clocksource_init(void); + +static inline void clockevent_set_clock(struct clock_event_device *cd, + unsigned int clock) +{ + clockevents_calc_mult_shift(cd, clock, 4); +} + +#endif /* _ASM_TIME_H */ diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h new file mode 100644 index 000000000..fb41e9e7a --- /dev/null +++ b/arch/loongarch/include/asm/timex.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_TIMEX_H +#define _ASM_TIMEX_H + +#ifdef __KERNEL__ + +#include <linux/compiler.h> + +#include <asm/cpu.h> +#include <asm/cpu-features.h> + +typedef unsigned long cycles_t; + +#define get_cycles get_cycles + +static inline cycles_t get_cycles(void) +{ + return drdtime(); +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_TIMEX_H */ diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h new file mode 100644 index 000000000..dd24f5898 --- /dev/null +++ b/arch/loongarch/include/asm/tlb.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_TLB_H +#define __ASM_TLB_H + +#include <linux/mm_types.h> +#include <asm/cpu-features.h> +#include <asm/loongarch.h> + +/* + * TLB Invalidate Flush + */ +static inline void tlbclr(void) +{ + __asm__ __volatile__("tlbclr"); +} + +static inline void tlbflush(void) +{ + __asm__ __volatile__("tlbflush"); +} + +/* + * TLB R/W operations. + */ +static inline void tlb_probe(void) +{ + __asm__ __volatile__("tlbsrch"); +} + +static inline void tlb_read(void) +{ + __asm__ __volatile__("tlbrd"); +} + +static inline void tlb_write_indexed(void) +{ + __asm__ __volatile__("tlbwr"); +} + +static inline void tlb_write_random(void) +{ + __asm__ __volatile__("tlbfill"); +} + +enum invtlb_ops { + /* Invalid all tlb */ + INVTLB_ALL = 0x0, + /* Invalid current tlb */ + INVTLB_CURRENT_ALL = 0x1, + /* Invalid all global=1 lines in current tlb */ + INVTLB_CURRENT_GTRUE = 0x2, + /* Invalid all global=0 lines in current tlb */ + INVTLB_CURRENT_GFALSE = 0x3, + /* Invalid global=0 and matched asid lines in current tlb */ + INVTLB_GFALSE_AND_ASID = 0x4, + /* Invalid addr with global=0 and matched asid in current tlb */ + INVTLB_ADDR_GFALSE_AND_ASID = 0x5, + /* Invalid addr with global=1 or matched asid in current tlb */ + INVTLB_ADDR_GTRUE_OR_ASID = 0x6, + /* Invalid matched gid in guest tlb */ + INVGTLB_GID = 0x9, + /* Invalid global=1, matched gid in guest tlb */ + INVGTLB_GID_GTRUE = 0xa, + /* Invalid global=0, matched gid in guest tlb */ + INVGTLB_GID_GFALSE = 0xb, + /* Invalid global=0, matched gid and asid in guest tlb */ + INVGTLB_GID_GFALSE_ASID = 0xc, + /* Invalid global=0 , matched gid, asid and addr in guest tlb */ + INVGTLB_GID_GFALSE_ASID_ADDR = 0xd, + /* Invalid global=1 , matched gid, asid and addr in guest tlb */ + INVGTLB_GID_GTRUE_ASID_ADDR = 0xe, + /* Invalid all gid gva-->gpa guest tlb */ + INVGTLB_ALLGID_GVA_TO_GPA = 0x10, + /* Invalid all gid gpa-->hpa tlb */ + INVTLB_ALLGID_GPA_TO_HPA = 0x11, + /* Invalid all gid tlb, including gva-->gpa and gpa-->hpa */ + INVTLB_ALLGID = 0x12, + /* Invalid matched gid gva-->gpa guest tlb */ + INVGTLB_GID_GVA_TO_GPA = 0x13, + /* Invalid matched gid gpa-->hpa tlb */ + INVTLB_GID_GPA_TO_HPA = 0x14, + /* Invalid matched gid tlb,including gva-->gpa and gpa-->hpa */ + INVTLB_GID_ALL = 0x15, + /* Invalid matched gid and addr gpa-->hpa tlb */ + INVTLB_GID_ADDR = 0x16, +}; + +/* + * invtlb op info addr + * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) | + * (addr << 10) | (info << 5) | op + */ +static inline void invtlb(u32 op, u32 info, u64 addr) +{ + __asm__ __volatile__( + "parse_r addr,%0\n\t" + "parse_r info,%1\n\t" + ".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t" + : + : "r"(addr), "r"(info), "i"(op) + : + ); +} + +static inline void invtlb_addr(u32 op, u32 info, u64 addr) +{ + __asm__ __volatile__( + "parse_r addr,%0\n\t" + ".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t" + : + : "r"(addr), "i"(op) + : + ); +} + +static inline void invtlb_info(u32 op, u32 info, u64 addr) +{ + __asm__ __volatile__( + "parse_r info,%0\n\t" + ".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t" + : + : "r"(info), "i"(op) + : + ); +} + +static inline void invtlb_all(u32 op, u32 info, u64 addr) +{ + __asm__ __volatile__( + ".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t" + : + : "i"(op) + : + ); +} + +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) + +static void tlb_flush(struct mmu_gather *tlb); + +#define tlb_flush tlb_flush +#include <asm-generic/tlb.h> + +static inline void tlb_flush(struct mmu_gather *tlb) +{ + struct vm_area_struct vma; + + vma.vm_mm = tlb->mm; + vma.vm_flags = 0; + if (tlb->fullmm) { + flush_tlb_mm(tlb->mm); + return; + } + + flush_tlb_range(&vma, tlb->start, tlb->end); +} + +extern void handle_tlb_load(void); +extern void handle_tlb_store(void); +extern void handle_tlb_modify(void); +extern void handle_tlb_refill(void); +extern void handle_tlb_protect(void); + +extern void dump_tlb_all(void); +extern void dump_tlb_regs(void); + +#endif /* __ASM_TLB_H */ diff --git a/arch/loongarch/include/asm/tlbflush.h b/arch/loongarch/include/asm/tlbflush.h new file mode 100644 index 000000000..a0785e590 --- /dev/null +++ b/arch/loongarch/include/asm/tlbflush.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_TLBFLUSH_H +#define __ASM_TLBFLUSH_H + +#include <linux/mm.h> + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void local_flush_tlb_all(void); +extern void local_flush_tlb_user(void); +extern void local_flush_tlb_kernel(void); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern void local_flush_tlb_one(unsigned long vaddr); + +#ifdef CONFIG_SMP + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long); +extern void flush_tlb_kernel_range(unsigned long, unsigned long); +extern void flush_tlb_page(struct vm_area_struct *, unsigned long); +extern void flush_tlb_one(unsigned long vaddr); + +#else /* CONFIG_SMP */ + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) +#define flush_tlb_kernel_range(vmaddr, end) local_flush_tlb_kernel_range(vmaddr, end) +#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) +#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) + +#endif /* CONFIG_SMP */ + +#endif /* __ASM_TLBFLUSH_H */ diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h new file mode 100644 index 000000000..66128dec0 --- /dev/null +++ b/arch/loongarch/include/asm/topology.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_TOPOLOGY_H +#define __ASM_TOPOLOGY_H + +#include <linux/smp.h> + +#ifdef CONFIG_NUMA + +extern cpumask_t cpus_on_node[]; + +#define cpumask_of_node(node) (&cpus_on_node[node]) + +struct pci_bus; +extern int pcibus_to_node(struct pci_bus *); + +#define cpumask_of_pcibus(bus) (cpu_online_mask) + +extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES]; + +void numa_set_distance(int from, int to, int distance); + +#define node_distance(from, to) (node_distances[(from)][(to)]) + +#else +#define pcibus_to_node(bus) 0 +#endif + +#ifdef CONFIG_SMP +#define topology_physical_package_id(cpu) (cpu_data[cpu].package) +#define topology_core_id(cpu) (cpu_data[cpu].core) +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) +#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu]) +#endif + +#include <asm-generic/topology.h> + +static inline void arch_fix_phys_package_id(int num, u32 slot) { } +#endif /* __ASM_TOPOLOGY_H */ diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h new file mode 100644 index 000000000..baf15a0dc --- /dev/null +++ b/arch/loongarch/include/asm/types.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_TYPES_H +#define _ASM_TYPES_H + +#include <asm-generic/int-ll64.h> +#include <uapi/asm/types.h> + +#ifdef __ASSEMBLY__ +#define _ULCAST_ +#define _U64CAST_ +#else +#define _ULCAST_ (unsigned long) +#define _U64CAST_ (u64) +#endif + +#endif /* _ASM_TYPES_H */ diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h new file mode 100644 index 000000000..a8ae2af40 --- /dev/null +++ b/arch/loongarch/include/asm/uaccess.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2014, Imagination Technologies Ltd. + */ +#ifndef _ASM_UACCESS_H +#define _ASM_UACCESS_H + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/extable.h> +#include <asm/pgtable.h> +#include <asm-generic/extable.h> +#include <asm-generic/access_ok.h> + +extern u64 __ua_limit; + +#define __UA_ADDR ".dword" +#define __UA_LA "la.abs" +#define __UA_LIMIT __ua_limit + +/* + * get_user: - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \ + ((x) = 0, -EFAULT); \ +}) + +/* + * put_user: - Write a simple value into user space. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \ +}) + +/* + * __get_user: - Get a simple variable from user space, with less checking. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define __get_user(x, ptr) \ +({ \ + int __gu_err = 0; \ + \ + __chk_user_ptr(ptr); \ + __get_user_common((x), sizeof(*(ptr)), ptr); \ + __gu_err; \ +}) + +/* + * __put_user: - Write a simple value into user space, with less checking. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + */ +#define __put_user(x, ptr) \ +({ \ + int __pu_err = 0; \ + __typeof__(*(ptr)) __pu_val; \ + \ + __pu_val = (x); \ + __chk_user_ptr(ptr); \ + __put_user_common(ptr, sizeof(*(ptr))); \ + __pu_err; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __get_user_common(val, size, ptr) \ +do { \ + switch (size) { \ + case 1: __get_data_asm(val, "ld.b", ptr); break; \ + case 2: __get_data_asm(val, "ld.h", ptr); break; \ + case 4: __get_data_asm(val, "ld.w", ptr); break; \ + case 8: __get_data_asm(val, "ld.d", ptr); break; \ + default: BUILD_BUG(); break; \ + } \ +} while (0) + +#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr) + +#define __get_data_asm(val, insn, ptr) \ +{ \ + long __gu_tmp; \ + \ + __asm__ __volatile__( \ + "1: " insn " %1, %2 \n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li.w %0, %3 \n" \ + " move %1, $zero \n" \ + " b 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " "__UA_ADDR "\t1b, 3b \n" \ + " .previous \n" \ + : "+r" (__gu_err), "=r" (__gu_tmp) \ + : "m" (__m(ptr)), "i" (-EFAULT)); \ + \ + (val) = (__typeof__(*(ptr))) __gu_tmp; \ +} + +#define __put_user_common(ptr, size) \ +do { \ + switch (size) { \ + case 1: __put_data_asm("st.b", ptr); break; \ + case 2: __put_data_asm("st.h", ptr); break; \ + case 4: __put_data_asm("st.w", ptr); break; \ + case 8: __put_data_asm("st.d", ptr); break; \ + default: BUILD_BUG(); break; \ + } \ +} while (0) + +#define __put_kernel_common(ptr, size) __put_user_common(ptr, size) + +#define __put_data_asm(insn, ptr) \ +{ \ + __asm__ __volatile__( \ + "1: " insn " %z2, %1 # __put_user_asm\n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li.w %0, %3 \n" \ + " b 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 3b \n" \ + " .previous \n" \ + : "+r" (__pu_err), "=m" (__m(ptr)) \ + : "Jr" (__pu_val), "i" (-EFAULT)); \ +} + +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + int __gu_err = 0; \ + \ + __get_kernel_common(*((type *)(dst)), sizeof(type), \ + (__force type *)(src)); \ + if (unlikely(__gu_err)) \ + goto err_label; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + type __pu_val; \ + int __pu_err = 0; \ + \ + __pu_val = *(__force type *)(src); \ + __put_kernel_common(((type *)(dst)), sizeof(type)); \ + if (unlikely(__pu_err)) \ + goto err_label; \ +} while (0) + +extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n); + +static inline unsigned long __must_check +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return __copy_user(to, (__force const void *)from, n); +} + +static inline unsigned long __must_check +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return __copy_user((__force void *)to, from, n); +} + +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +/* + * __clear_user: - Zero a block of memory in user space, with less checking. + * @addr: Destination address, in user space. + * @size: Number of bytes to zero. + * + * Zero a block of memory in user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +extern unsigned long __clear_user(void __user *addr, __kernel_size_t size); + +#define clear_user(addr, n) \ +({ \ + void __user *__cl_addr = (addr); \ + unsigned long __cl_size = (n); \ + if (__cl_size && access_ok(__cl_addr, __cl_size)) \ + __cl_size = __clear_user(__cl_addr, __cl_size); \ + __cl_size; \ +}) + +extern long strncpy_from_user(char *to, const char __user *from, long n); +extern long strnlen_user(const char __user *str, long n); + +#endif /* _ASM_UACCESS_H */ diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h new file mode 100644 index 000000000..cfddb0116 --- /dev/null +++ b/arch/loongarch/include/asm/unistd.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <uapi/asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/loongarch/include/asm/unwind.h b/arch/loongarch/include/asm/unwind.h new file mode 100644 index 000000000..6af4718bd --- /dev/null +++ b/arch/loongarch/include/asm/unwind.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Most of this ideas comes from x86. + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_UNWIND_H +#define _ASM_UNWIND_H + +#include <linux/sched.h> + +#include <asm/stacktrace.h> + +enum unwinder_type { + UNWINDER_GUESS, + UNWINDER_PROLOGUE, +}; + +struct unwind_state { + char type; /* UNWINDER_XXX */ + struct stack_info stack_info; + struct task_struct *task; + bool first, error; + unsigned long sp, pc, ra; +}; + +void unwind_start(struct unwind_state *state, + struct task_struct *task, struct pt_regs *regs); +bool unwind_next_frame(struct unwind_state *state); +unsigned long unwind_get_return_address(struct unwind_state *state); + +static inline bool unwind_done(struct unwind_state *state) +{ + return state->stack_info.type == STACK_TYPE_UNKNOWN; +} + +static inline bool unwind_error(struct unwind_state *state) +{ + return state->error; +} + +#endif /* _ASM_UNWIND_H */ diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h new file mode 100644 index 000000000..d3ba35eb2 --- /dev/null +++ b/arch/loongarch/include/asm/vdso.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_VDSO_H +#define __ASM_VDSO_H + +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <vdso/datapage.h> + +#include <asm/barrier.h> + +/* + * struct loongarch_vdso_info - Details of a VDSO image. + * @vdso: Pointer to VDSO image (page-aligned). + * @size: Size of the VDSO image (page-aligned). + * @off_rt_sigreturn: Offset of the rt_sigreturn() trampoline. + * @code_mapping: Special mapping structure for vdso code. + * @code_mapping: Special mapping structure for vdso data. + * + * This structure contains details of a VDSO image, including the image data + * and offsets of certain symbols required by the kernel. It is generated as + * part of the VDSO build process, aside from the mapping page array, which is + * populated at runtime. + */ +struct loongarch_vdso_info { + void *vdso; + unsigned long size; + unsigned long offset_sigreturn; + struct vm_special_mapping code_mapping; + struct vm_special_mapping data_mapping; +}; + +extern struct loongarch_vdso_info vdso_info; + +#endif /* __ASM_VDSO_H */ diff --git a/arch/loongarch/include/asm/vdso/clocksource.h b/arch/loongarch/include/asm/vdso/clocksource.h new file mode 100644 index 000000000..13cd580d4 --- /dev/null +++ b/arch/loongarch/include/asm/vdso/clocksource.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __ASM_VDSOCLOCKSOURCE_H +#define __ASM_VDSOCLOCKSOURCE_H + +#define VDSO_ARCH_CLOCKMODES \ + VDSO_CLOCKMODE_CPU + +#endif /* __ASM_VDSOCLOCKSOURCE_H */ diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h new file mode 100644 index 000000000..7b2cd3764 --- /dev/null +++ b/arch/loongarch/include/asm/vdso/gettimeofday.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_VDSO_GETTIMEOFDAY_H +#define __ASM_VDSO_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include <asm/unistd.h> +#include <asm/vdso/vdso.h> + +#define VDSO_HAS_CLOCK_GETRES 1 + +static __always_inline long gettimeofday_fallback( + struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct __kernel_old_timeval *tv asm("a0") = _tv; + register struct timezone *tz asm("a1") = _tz; + register long nr asm("a7") = __NR_gettimeofday; + register long ret asm("a0"); + + asm volatile( + " syscall 0\n" + : "+r" (ret) + : "r" (nr), "r" (tv), "r" (tz) + : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", + "$t8", "memory"); + + return ret; +} + +static __always_inline long clock_gettime_fallback( + clockid_t _clkid, + struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long nr asm("a7") = __NR_clock_gettime; + register long ret asm("a0"); + + asm volatile( + " syscall 0\n" + : "+r" (ret) + : "r" (nr), "r" (clkid), "r" (ts) + : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", + "$t8", "memory"); + + return ret; +} + +static __always_inline int clock_getres_fallback( + clockid_t _clkid, + struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long nr asm("a7") = __NR_clock_getres; + register long ret asm("a0"); + + asm volatile( + " syscall 0\n" + : "+r" (ret) + : "r" (nr), "r" (clkid), "r" (ts) + : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", + "$t8", "memory"); + + return ret; +} + +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) +{ + uint64_t count; + + __asm__ __volatile__( + " rdtime.d %0, $zero\n" + : "=r" (count)); + + return count; +} + +static inline bool loongarch_vdso_hres_capable(void) +{ + return true; +} +#define __arch_vdso_hres_capable loongarch_vdso_hres_capable + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return get_vdso_data(); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h new file mode 100644 index 000000000..ef5770b34 --- /dev/null +++ b/arch/loongarch/include/asm/vdso/processor.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#define cpu_relax() barrier() + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h new file mode 100644 index 000000000..3b55d32a0 --- /dev/null +++ b/arch/loongarch/include/asm/vdso/vdso.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASSEMBLY__ + +#include <asm/asm.h> +#include <asm/page.h> +#include <asm/vdso.h> + +struct vdso_pcpu_data { + u32 node; +} ____cacheline_aligned_in_smp; + +struct loongarch_vdso_data { + struct vdso_pcpu_data pdata[NR_CPUS]; + struct vdso_data data[CS_BASES]; /* Arch-independent data */ +}; + +#define VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data)) + +static inline unsigned long get_vdso_base(void) +{ + unsigned long addr; + + __asm__( + " la.pcrel %0, _start\n" + : "=r" (addr) + : + :); + + return addr; +} + +static inline const struct vdso_data *get_vdso_data(void) +{ + return (const struct vdso_data *)(get_vdso_base() + - VDSO_DATA_SIZE + SMP_CACHE_BYTES * NR_CPUS); +} + +#endif /* __ASSEMBLY__ */ diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h new file mode 100644 index 000000000..5de615383 --- /dev/null +++ b/arch/loongarch/include/asm/vdso/vsyscall.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_VSYSCALL_H +#define __ASM_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <linux/timekeeper_internal.h> +#include <vdso/datapage.h> + +extern struct vdso_data *vdso_data; + +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ +static __always_inline +struct vdso_data *__loongarch_get_k_vdso_data(void) +{ + return vdso_data; +} +#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data + +/* The asm-generic header needs to be included after the definitions above */ +#include <asm-generic/vdso/vsyscall.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/loongarch/include/asm/vermagic.h b/arch/loongarch/include/asm/vermagic.h new file mode 100644 index 000000000..8b47ccfe3 --- /dev/null +++ b/arch/loongarch/include/asm/vermagic.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_VERMAGIC_H +#define _ASM_VERMAGIC_H + +#define MODULE_PROC_FAMILY "LOONGARCH " + +#ifdef CONFIG_32BIT +#define MODULE_KERNEL_TYPE "32BIT " +#elif defined CONFIG_64BIT +#define MODULE_KERNEL_TYPE "64BIT " +#endif + +#define MODULE_ARCH_VERMAGIC \ + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE + +#endif /* _ASM_VERMAGIC_H */ diff --git a/arch/loongarch/include/asm/vmalloc.h b/arch/loongarch/include/asm/vmalloc.h new file mode 100644 index 000000000..965a0d41a --- /dev/null +++ b/arch/loongarch/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_LOONGARCH_VMALLOC_H +#define _ASM_LOONGARCH_VMALLOC_H + +#endif /* _ASM_LOONGARCH_VMALLOC_H */ diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild new file mode 100644 index 000000000..4aa680ca2 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/Kbuild @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += kvm_para.h diff --git a/arch/loongarch/include/uapi/asm/auxvec.h b/arch/loongarch/include/uapi/asm/auxvec.h new file mode 100644 index 000000000..922d9e6b5 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/auxvec.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_AUXVEC_H +#define __ASM_AUXVEC_H + +/* Location of VDSO image. */ +#define AT_SYSINFO_EHDR 33 + +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + +#endif /* __ASM_AUXVEC_H */ diff --git a/arch/loongarch/include/uapi/asm/bitsperlong.h b/arch/loongarch/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000..00b4ba1e5 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_LOONGARCH_BITSPERLONG_H +#define __ASM_LOONGARCH_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8) + +#include <asm-generic/bitsperlong.h> + +#endif /* __ASM_LOONGARCH_BITSPERLONG_H */ diff --git a/arch/loongarch/include/uapi/asm/bpf_perf_event.h b/arch/loongarch/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000..eb6e2fd2a --- /dev/null +++ b/arch/loongarch/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_BPF_PERF_EVENT_H__ + +#include <linux/ptrace.h> + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/arch/loongarch/include/uapi/asm/break.h b/arch/loongarch/include/uapi/asm/break.h new file mode 100644 index 000000000..bb9b82ba5 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/break.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __UAPI_ASM_BREAK_H +#define __UAPI_ASM_BREAK_H + +#define BRK_DEFAULT 0 /* Used as default */ +#define BRK_BUG 1 /* Used by BUG() */ +#define BRK_KDB 2 /* Used in KDB_ENTER() */ +#define BRK_MATHEMU 3 /* Used by FPU emulator */ +#define BRK_USERBP 4 /* User bp (used by debuggers) */ +#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */ +#define BRK_OVERFLOW 6 /* Overflow check */ +#define BRK_DIVZERO 7 /* Divide by zero check */ +#define BRK_RANGE 8 /* Range error check */ +#define BRK_MULOVFL 9 /* Multiply overflow */ +#define BRK_KPROBE_BP 10 /* Kprobe break */ +#define BRK_KPROBE_SSTEPBP 11 /* Kprobe single step break */ +#define BRK_UPROBE_BP 12 /* See <asm/uprobes.h> */ +#define BRK_UPROBE_XOLBP 13 /* See <asm/uprobes.h> */ + +#endif /* __UAPI_ASM_BREAK_H */ diff --git a/arch/loongarch/include/uapi/asm/byteorder.h b/arch/loongarch/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..b1722d890 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/byteorder.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BYTEORDER_H +#define _ASM_BYTEORDER_H + +#include <linux/byteorder/little_endian.h> + +#endif /* _ASM_BYTEORDER_H */ diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h new file mode 100644 index 000000000..8840b72fa --- /dev/null +++ b/arch/loongarch/include/uapi/asm/hwcap.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_HWCAP_H +#define _UAPI_ASM_HWCAP_H + +/* HWCAP flags */ +#define HWCAP_LOONGARCH_CPUCFG (1 << 0) +#define HWCAP_LOONGARCH_LAM (1 << 1) +#define HWCAP_LOONGARCH_UAL (1 << 2) +#define HWCAP_LOONGARCH_FPU (1 << 3) +#define HWCAP_LOONGARCH_LSX (1 << 4) +#define HWCAP_LOONGARCH_LASX (1 << 5) +#define HWCAP_LOONGARCH_CRC32 (1 << 6) +#define HWCAP_LOONGARCH_COMPLEX (1 << 7) +#define HWCAP_LOONGARCH_CRYPTO (1 << 8) +#define HWCAP_LOONGARCH_LVZ (1 << 9) +#define HWCAP_LOONGARCH_LBT_X86 (1 << 10) +#define HWCAP_LOONGARCH_LBT_ARM (1 << 11) +#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12) + +#endif /* _UAPI_ASM_HWCAP_H */ diff --git a/arch/loongarch/include/uapi/asm/perf_regs.h b/arch/loongarch/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000..29d69c00f --- /dev/null +++ b/arch/loongarch/include/uapi/asm/perf_regs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_LOONGARCH_PERF_REGS_H +#define _ASM_LOONGARCH_PERF_REGS_H + +enum perf_event_loongarch_regs { + PERF_REG_LOONGARCH_PC, + PERF_REG_LOONGARCH_R1, + PERF_REG_LOONGARCH_R2, + PERF_REG_LOONGARCH_R3, + PERF_REG_LOONGARCH_R4, + PERF_REG_LOONGARCH_R5, + PERF_REG_LOONGARCH_R6, + PERF_REG_LOONGARCH_R7, + PERF_REG_LOONGARCH_R8, + PERF_REG_LOONGARCH_R9, + PERF_REG_LOONGARCH_R10, + PERF_REG_LOONGARCH_R11, + PERF_REG_LOONGARCH_R12, + PERF_REG_LOONGARCH_R13, + PERF_REG_LOONGARCH_R14, + PERF_REG_LOONGARCH_R15, + PERF_REG_LOONGARCH_R16, + PERF_REG_LOONGARCH_R17, + PERF_REG_LOONGARCH_R18, + PERF_REG_LOONGARCH_R19, + PERF_REG_LOONGARCH_R20, + PERF_REG_LOONGARCH_R21, + PERF_REG_LOONGARCH_R22, + PERF_REG_LOONGARCH_R23, + PERF_REG_LOONGARCH_R24, + PERF_REG_LOONGARCH_R25, + PERF_REG_LOONGARCH_R26, + PERF_REG_LOONGARCH_R27, + PERF_REG_LOONGARCH_R28, + PERF_REG_LOONGARCH_R29, + PERF_REG_LOONGARCH_R30, + PERF_REG_LOONGARCH_R31, + PERF_REG_LOONGARCH_MAX, +}; +#endif /* _ASM_LOONGARCH_PERF_REGS_H */ diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..083193f4a --- /dev/null +++ b/arch/loongarch/include/uapi/asm/ptrace.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _UAPI_ASM_PTRACE_H +#define _UAPI_ASM_PTRACE_H + +#include <linux/types.h> + +#ifndef __KERNEL__ +#include <stdint.h> +#endif + +/* + * For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs, + * 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR. + */ +#define GPR_BASE 0 +#define GPR_NUM 32 +#define GPR_END (GPR_BASE + GPR_NUM - 1) +#define ARG0 (GPR_END + 1) +#define PC (GPR_END + 2) +#define BADVADDR (GPR_END + 3) + +#define NUM_FPU_REGS 32 + +struct user_pt_regs { + /* Main processor registers. */ + unsigned long regs[32]; + + /* Original syscall arg0. */ + unsigned long orig_a0; + + /* Special CSR registers. */ + unsigned long csr_era; + unsigned long csr_badv; + unsigned long reserved[10]; +} __attribute__((aligned(8))); + +struct user_fp_state { + uint64_t fpr[32]; + uint64_t fcc; + uint32_t fcsr; +}; + +#define PTRACE_SYSEMU 0x1f +#define PTRACE_SYSEMU_SINGLESTEP 0x20 + +#endif /* _UAPI_ASM_PTRACE_H */ diff --git a/arch/loongarch/include/uapi/asm/reg.h b/arch/loongarch/include/uapi/asm/reg.h new file mode 100644 index 000000000..90ad910c6 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/reg.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Various register offset definitions for debuggers, core file + * examiners and whatnot. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_REG_H +#define __UAPI_ASM_LOONGARCH_REG_H + +#define LOONGARCH_EF_R0 0 +#define LOONGARCH_EF_R1 1 +#define LOONGARCH_EF_R2 2 +#define LOONGARCH_EF_R3 3 +#define LOONGARCH_EF_R4 4 +#define LOONGARCH_EF_R5 5 +#define LOONGARCH_EF_R6 6 +#define LOONGARCH_EF_R7 7 +#define LOONGARCH_EF_R8 8 +#define LOONGARCH_EF_R9 9 +#define LOONGARCH_EF_R10 10 +#define LOONGARCH_EF_R11 11 +#define LOONGARCH_EF_R12 12 +#define LOONGARCH_EF_R13 13 +#define LOONGARCH_EF_R14 14 +#define LOONGARCH_EF_R15 15 +#define LOONGARCH_EF_R16 16 +#define LOONGARCH_EF_R17 17 +#define LOONGARCH_EF_R18 18 +#define LOONGARCH_EF_R19 19 +#define LOONGARCH_EF_R20 20 +#define LOONGARCH_EF_R21 21 +#define LOONGARCH_EF_R22 22 +#define LOONGARCH_EF_R23 23 +#define LOONGARCH_EF_R24 24 +#define LOONGARCH_EF_R25 25 +#define LOONGARCH_EF_R26 26 +#define LOONGARCH_EF_R27 27 +#define LOONGARCH_EF_R28 28 +#define LOONGARCH_EF_R29 29 +#define LOONGARCH_EF_R30 30 +#define LOONGARCH_EF_R31 31 + +/* + * Saved special registers + */ +#define LOONGARCH_EF_ORIG_A0 32 +#define LOONGARCH_EF_CSR_ERA 33 +#define LOONGARCH_EF_CSR_BADV 34 +#define LOONGARCH_EF_CSR_CRMD 35 +#define LOONGARCH_EF_CSR_PRMD 36 +#define LOONGARCH_EF_CSR_EUEN 37 +#define LOONGARCH_EF_CSR_ECFG 38 +#define LOONGARCH_EF_CSR_ESTAT 39 + +#define LOONGARCH_EF_SIZE 320 /* size in bytes */ + +#endif /* __UAPI_ASM_LOONGARCH_REG_H */ diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..52e49b8bf --- /dev/null +++ b/arch/loongarch/include/uapi/asm/sigcontext.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _UAPI_ASM_SIGCONTEXT_H +#define _UAPI_ASM_SIGCONTEXT_H + +#include <linux/types.h> +#include <linux/posix_types.h> + +/* FP context was used */ +#define SC_USED_FP (1 << 0) +/* Address error was due to memory load */ +#define SC_ADDRERR_RD (1 << 30) +/* Address error was due to memory store */ +#define SC_ADDRERR_WR (1 << 31) + +struct sigcontext { + __u64 sc_pc; + __u64 sc_regs[32]; + __u32 sc_flags; + __u64 sc_extcontext[0] __attribute__((__aligned__(16))); +}; + +#define CONTEXT_INFO_ALIGN 16 +struct sctx_info { + __u32 magic; + __u32 size; + __u64 padding; /* padding to 16 bytes */ +}; + +/* FPU context */ +#define FPU_CTX_MAGIC 0x46505501 +#define FPU_CTX_ALIGN 8 +struct fpu_context { + __u64 regs[32]; + __u64 fcc; + __u32 fcsr; +}; + +#endif /* _UAPI_ASM_SIGCONTEXT_H */ diff --git a/arch/loongarch/include/uapi/asm/signal.h b/arch/loongarch/include/uapi/asm/signal.h new file mode 100644 index 000000000..992d965aa --- /dev/null +++ b/arch/loongarch/include/uapi/asm/signal.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _UAPI_ASM_SIGNAL_H +#define _UAPI_ASM_SIGNAL_H + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 16384 + +#include <asm-generic/signal.h> + +#endif diff --git a/arch/loongarch/include/uapi/asm/ucontext.h b/arch/loongarch/include/uapi/asm/ucontext.h new file mode 100644 index 000000000..12577e22b --- /dev/null +++ b/arch/loongarch/include/uapi/asm/ucontext.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __LOONGARCH_UAPI_ASM_UCONTEXT_H +#define __LOONGARCH_UAPI_ASM_UCONTEXT_H + +/** + * struct ucontext - user context structure + * @uc_flags: + * @uc_link: + * @uc_stack: + * @uc_mcontext: holds basic processor state + * @uc_sigmask: + * @uc_extcontext: holds extended processor state + */ +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + sigset_t uc_sigmask; + /* There's some padding here to allow sigset_t to be expanded in the + * future. Though this is unlikely, other architectures put uc_sigmask + * at the end of this structure and explicitly state it can be + * expanded, so we didn't want to box ourselves in here. */ + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; + /* We can't put uc_sigmask at the end of this structure because we need + * to be able to expand sigcontext in the future. For example, the + * vector ISA extension will almost certainly add ISA state. We want + * to ensure all user-visible ISA state can be saved and restored via a + * ucontext, so we're putting this at the end in order to allow for + * infinite extensibility. Since we know this will be extended and we + * assume sigset_t won't be extended an extreme amount, we're + * prioritizing this. */ + struct sigcontext uc_mcontext; +}; + +#endif /* __LOONGARCH_UAPI_ASM_UCONTEXT_H */ diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h new file mode 100644 index 000000000..fcb668984 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/unistd.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 + +#include <asm-generic/unistd.h> diff --git a/arch/loongarch/kernel/.gitignore b/arch/loongarch/kernel/.gitignore new file mode 100644 index 000000000..bbb90f92d --- /dev/null +++ b/arch/loongarch/kernel/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +vmlinux.lds diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile new file mode 100644 index 000000000..42be56427 --- /dev/null +++ b/arch/loongarch/kernel/Makefile @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/LoongArch kernel. +# + +extra-y := vmlinux.lds + +obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ + traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ + elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o + +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_EFI) += efi.o + +obj-$(CONFIG_CPU_HAS_FPU) += fpu.o + +obj-$(CONFIG_MODULES) += module.o module-sections.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o + +obj-$(CONFIG_PROC_FS) += proc.o + +obj-$(CONFIG_SMP) += smp.o + +obj-$(CONFIG_NUMA) += numa.o + +obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o + +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o + +obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o +obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o + +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o + +CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) diff --git a/arch/loongarch/kernel/access-helper.h b/arch/loongarch/kernel/access-helper.h new file mode 100644 index 000000000..4a35ca81b --- /dev/null +++ b/arch/loongarch/kernel/access-helper.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/uaccess.h> + +static inline int __get_inst(u32 *i, u32 *p, bool user) +{ + return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p); +} + +static inline int __get_addr(unsigned long *a, unsigned long *p, bool user) +{ + return user ? get_user(*a, (unsigned long __user *)p) : get_kernel_nofault(*a, p); +} diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c new file mode 100644 index 000000000..8319cc409 --- /dev/null +++ b/arch/loongarch/kernel/acpi.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * acpi.c - Architecture-Specific Low-Level ACPI Boot Support + * + * Author: Jianmin Lv <lvjianmin@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/memblock.h> +#include <linux/serial_core.h> +#include <asm/io.h> +#include <asm/numa.h> +#include <asm/loongson.h> + +int acpi_disabled; +EXPORT_SYMBOL(acpi_disabled); +int acpi_noirq; +int acpi_pci_disabled; +EXPORT_SYMBOL(acpi_pci_disabled); +int acpi_strict = 1; /* We have no workarounds on LoongArch */ +int num_processors; +int disabled_cpus; + +u64 acpi_saved_sp; + +#define MAX_CORE_PIC 256 + +#define PREFIX "ACPI: " + +void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size) +{ + + if (!phys || !size) + return NULL; + + return early_memremap(phys, size); +} +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) +{ + if (!map || !size) + return; + + early_memunmap(map, size); +} + +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) +{ + if (!memblock_is_memory(phys)) + return ioremap(phys, size); + else + return ioremap_cache(phys, size); +} + +#ifdef CONFIG_SMP +static int set_processor_mask(u32 id, u32 flags) +{ + + int cpu, cpuid = id; + + if (num_processors >= nr_cpu_ids) { + pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached." + " processor 0x%x ignored.\n", nr_cpu_ids, cpuid); + + return -ENODEV; + + } + if (cpuid == loongson_sysconf.boot_cpu_id) + cpu = 0; + else + cpu = cpumask_next_zero(-1, cpu_present_mask); + + if (flags & ACPI_MADT_ENABLED) { + num_processors++; + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); + __cpu_number_map[cpuid] = cpu; + __cpu_logical_map[cpu] = cpuid; + } else + disabled_cpus++; + + return cpu; +} +#endif + +static int __init +acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_core_pic *processor = NULL; + + processor = (struct acpi_madt_core_pic *)header; + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); +#ifdef CONFIG_SMP + set_processor_mask(processor->core_id, processor->flags); +#endif + + return 0; +} + +static int __init +acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end) +{ + static int core = 0; + struct acpi_madt_eio_pic *eiointc = NULL; + + eiointc = (struct acpi_madt_eio_pic *)header; + if (BAD_MADT_ENTRY(eiointc, end)) + return -EINVAL; + + core = eiointc->node * CORES_PER_EIO_NODE; + set_bit(core, &(loongson_sysconf.cores_io_master)); + + return 0; +} + +static void __init acpi_process_madt(void) +{ +#ifdef CONFIG_SMP + int i; + + for (i = 0; i < NR_CPUS; i++) { + __cpu_number_map[i] = -1; + __cpu_logical_map[i] = -1; + } +#endif + acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, + acpi_parse_processor, MAX_CORE_PIC); + + acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, + acpi_parse_eio_master, MAX_IO_PICS); + + loongson_sysconf.nr_cpus = num_processors; +} + +void __init acpi_boot_table_init(void) +{ + /* + * If acpi_disabled, bail out + */ + if (acpi_disabled) + return; + + /* + * Initialize the ACPI boot-time table parser. + */ + if (acpi_table_init()) { + disable_acpi(); + return; + } + + loongson_sysconf.boot_cpu_id = read_csr_cpuid(); + + /* + * Process the Multiple APIC Description Table (MADT), if present + */ + acpi_process_madt(); + + /* Do not enable ACPI SPCR console by default */ + acpi_parse_spcr(earlycon_acpi_spcr_enable, false); +} + +#ifdef CONFIG_ACPI_NUMA + +static __init int setup_node(int pxm) +{ + return acpi_map_pxm_to_node(pxm); +} + +/* + * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for + * I/O localities since SRAT does not list them. I/O localities are + * not supported at this point. + */ +unsigned int numa_distance_cnt; + +static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit) +{ + return slit->locality_count; +} + +void __init numa_set_distance(int from, int to, int distance) +{ + if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + node_distances[from][to] = distance; +} + +/* Callback for Proximity Domain -> CPUID mapping */ +void __init +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) +{ + int pxm, node; + + if (srat_disabled()) + return; + if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { + bad_srat(); + return; + } + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) + return; + pxm = pa->proximity_domain_lo; + if (acpi_srat_revision >= 2) { + pxm |= (pa->proximity_domain_hi[0] << 8); + pxm |= (pa->proximity_domain_hi[1] << 16); + pxm |= (pa->proximity_domain_hi[2] << 24); + } + node = setup_node(pxm); + if (node < 0) { + pr_err("SRAT: Too many proximity domains %x\n", pxm); + bad_srat(); + return; + } + + if (pa->apic_id >= CONFIG_NR_CPUS) { + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", + pxm, pa->apic_id, node); + return; + } + + early_numa_add_cpu(pa->apic_id, node); + + set_cpuid_to_node(pa->apic_id, node); + node_set(node, numa_nodes_parsed); + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); +} + +void __init acpi_numa_arch_fixup(void) {} +#endif + +void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) +{ + memblock_reserve(addr, size); +} + +#ifdef CONFIG_ACPI_HOTPLUG_CPU + +#include <acpi/processor.h> + +static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +{ +#ifdef CONFIG_ACPI_NUMA + int nid; + + nid = acpi_get_node(handle); + if (nid != NUMA_NO_NODE) { + set_cpuid_to_node(physid, nid); + node_set(nid, numa_nodes_parsed); + set_cpu_numa_node(cpu, nid); + cpumask_set_cpu(cpu, cpumask_of_node(nid)); + } +#endif + return 0; +} + +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu) +{ + int cpu; + + cpu = set_processor_mask(physid, ACPI_MADT_ENABLED); + if (cpu < 0) { + pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); + return cpu; + } + + acpi_map_cpu2node(handle, cpu, physid); + + *pcpu = cpu; + + return 0; +} +EXPORT_SYMBOL(acpi_map_cpu); + +int acpi_unmap_cpu(int cpu) +{ +#ifdef CONFIG_ACPI_NUMA + set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE); +#endif + set_cpu_present(cpu, false); + num_processors--; + + pr_info("cpu%d hot remove!\n", cpu); + + return 0; +} +EXPORT_SYMBOL(acpi_unmap_cpu); + +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c new file mode 100644 index 000000000..bdd88eda9 --- /dev/null +++ b/arch/loongarch/kernel/asm-offsets.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * asm-offsets.c: Calculate pt_regs and task_struct offsets. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/kbuild.h> +#include <linux/suspend.h> +#include <asm/cpu-info.h> +#include <asm/ptrace.h> +#include <asm/processor.h> + +void output_ptreg_defines(void) +{ + COMMENT("LoongArch pt_regs offsets."); + OFFSET(PT_R0, pt_regs, regs[0]); + OFFSET(PT_R1, pt_regs, regs[1]); + OFFSET(PT_R2, pt_regs, regs[2]); + OFFSET(PT_R3, pt_regs, regs[3]); + OFFSET(PT_R4, pt_regs, regs[4]); + OFFSET(PT_R5, pt_regs, regs[5]); + OFFSET(PT_R6, pt_regs, regs[6]); + OFFSET(PT_R7, pt_regs, regs[7]); + OFFSET(PT_R8, pt_regs, regs[8]); + OFFSET(PT_R9, pt_regs, regs[9]); + OFFSET(PT_R10, pt_regs, regs[10]); + OFFSET(PT_R11, pt_regs, regs[11]); + OFFSET(PT_R12, pt_regs, regs[12]); + OFFSET(PT_R13, pt_regs, regs[13]); + OFFSET(PT_R14, pt_regs, regs[14]); + OFFSET(PT_R15, pt_regs, regs[15]); + OFFSET(PT_R16, pt_regs, regs[16]); + OFFSET(PT_R17, pt_regs, regs[17]); + OFFSET(PT_R18, pt_regs, regs[18]); + OFFSET(PT_R19, pt_regs, regs[19]); + OFFSET(PT_R20, pt_regs, regs[20]); + OFFSET(PT_R21, pt_regs, regs[21]); + OFFSET(PT_R22, pt_regs, regs[22]); + OFFSET(PT_R23, pt_regs, regs[23]); + OFFSET(PT_R24, pt_regs, regs[24]); + OFFSET(PT_R25, pt_regs, regs[25]); + OFFSET(PT_R26, pt_regs, regs[26]); + OFFSET(PT_R27, pt_regs, regs[27]); + OFFSET(PT_R28, pt_regs, regs[28]); + OFFSET(PT_R29, pt_regs, regs[29]); + OFFSET(PT_R30, pt_regs, regs[30]); + OFFSET(PT_R31, pt_regs, regs[31]); + OFFSET(PT_CRMD, pt_regs, csr_crmd); + OFFSET(PT_PRMD, pt_regs, csr_prmd); + OFFSET(PT_EUEN, pt_regs, csr_euen); + OFFSET(PT_ECFG, pt_regs, csr_ecfg); + OFFSET(PT_ESTAT, pt_regs, csr_estat); + OFFSET(PT_ERA, pt_regs, csr_era); + OFFSET(PT_BVADDR, pt_regs, csr_badvaddr); + OFFSET(PT_ORIG_A0, pt_regs, orig_a0); + DEFINE(PT_SIZE, sizeof(struct pt_regs)); + BLANK(); +} + +void output_task_defines(void) +{ + COMMENT("LoongArch task_struct offsets."); + OFFSET(TASK_STATE, task_struct, __state); + OFFSET(TASK_THREAD_INFO, task_struct, stack); + OFFSET(TASK_FLAGS, task_struct, flags); + OFFSET(TASK_MM, task_struct, mm); + OFFSET(TASK_PID, task_struct, pid); + DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); + BLANK(); +} + +void output_thread_info_defines(void) +{ + COMMENT("LoongArch thread_info offsets."); + OFFSET(TI_TASK, thread_info, task); + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_TP_VALUE, thread_info, tp_value); + OFFSET(TI_CPU, thread_info, cpu); + OFFSET(TI_PRE_COUNT, thread_info, preempt_count); + OFFSET(TI_REGS, thread_info, regs); + DEFINE(_THREAD_SIZE, THREAD_SIZE); + DEFINE(_THREAD_MASK, THREAD_MASK); + DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); + DEFINE(_IRQ_STACK_START, IRQ_STACK_START); + BLANK(); +} + +void output_thread_defines(void) +{ + COMMENT("LoongArch specific thread_struct offsets."); + OFFSET(THREAD_REG01, task_struct, thread.reg01); + OFFSET(THREAD_REG03, task_struct, thread.reg03); + OFFSET(THREAD_REG22, task_struct, thread.reg22); + OFFSET(THREAD_REG23, task_struct, thread.reg23); + OFFSET(THREAD_REG24, task_struct, thread.reg24); + OFFSET(THREAD_REG25, task_struct, thread.reg25); + OFFSET(THREAD_REG26, task_struct, thread.reg26); + OFFSET(THREAD_REG27, task_struct, thread.reg27); + OFFSET(THREAD_REG28, task_struct, thread.reg28); + OFFSET(THREAD_REG29, task_struct, thread.reg29); + OFFSET(THREAD_REG30, task_struct, thread.reg30); + OFFSET(THREAD_REG31, task_struct, thread.reg31); + OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra); + OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa); + OFFSET(THREAD_CSRCRMD, task_struct, + thread.csr_crmd); + OFFSET(THREAD_CSRPRMD, task_struct, + thread.csr_prmd); + OFFSET(THREAD_CSREUEN, task_struct, + thread.csr_euen); + OFFSET(THREAD_CSRECFG, task_struct, + thread.csr_ecfg); + + OFFSET(THREAD_SCR0, task_struct, thread.scr0); + OFFSET(THREAD_SCR1, task_struct, thread.scr1); + OFFSET(THREAD_SCR2, task_struct, thread.scr2); + OFFSET(THREAD_SCR3, task_struct, thread.scr3); + + OFFSET(THREAD_EFLAGS, task_struct, thread.eflags); + + OFFSET(THREAD_FPU, task_struct, thread.fpu); + + OFFSET(THREAD_BVADDR, task_struct, \ + thread.csr_badvaddr); + OFFSET(THREAD_ECODE, task_struct, \ + thread.error_code); + OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr); + BLANK(); +} + +void output_thread_fpu_defines(void) +{ + OFFSET(THREAD_FPR0, loongarch_fpu, fpr[0]); + OFFSET(THREAD_FPR1, loongarch_fpu, fpr[1]); + OFFSET(THREAD_FPR2, loongarch_fpu, fpr[2]); + OFFSET(THREAD_FPR3, loongarch_fpu, fpr[3]); + OFFSET(THREAD_FPR4, loongarch_fpu, fpr[4]); + OFFSET(THREAD_FPR5, loongarch_fpu, fpr[5]); + OFFSET(THREAD_FPR6, loongarch_fpu, fpr[6]); + OFFSET(THREAD_FPR7, loongarch_fpu, fpr[7]); + OFFSET(THREAD_FPR8, loongarch_fpu, fpr[8]); + OFFSET(THREAD_FPR9, loongarch_fpu, fpr[9]); + OFFSET(THREAD_FPR10, loongarch_fpu, fpr[10]); + OFFSET(THREAD_FPR11, loongarch_fpu, fpr[11]); + OFFSET(THREAD_FPR12, loongarch_fpu, fpr[12]); + OFFSET(THREAD_FPR13, loongarch_fpu, fpr[13]); + OFFSET(THREAD_FPR14, loongarch_fpu, fpr[14]); + OFFSET(THREAD_FPR15, loongarch_fpu, fpr[15]); + OFFSET(THREAD_FPR16, loongarch_fpu, fpr[16]); + OFFSET(THREAD_FPR17, loongarch_fpu, fpr[17]); + OFFSET(THREAD_FPR18, loongarch_fpu, fpr[18]); + OFFSET(THREAD_FPR19, loongarch_fpu, fpr[19]); + OFFSET(THREAD_FPR20, loongarch_fpu, fpr[20]); + OFFSET(THREAD_FPR21, loongarch_fpu, fpr[21]); + OFFSET(THREAD_FPR22, loongarch_fpu, fpr[22]); + OFFSET(THREAD_FPR23, loongarch_fpu, fpr[23]); + OFFSET(THREAD_FPR24, loongarch_fpu, fpr[24]); + OFFSET(THREAD_FPR25, loongarch_fpu, fpr[25]); + OFFSET(THREAD_FPR26, loongarch_fpu, fpr[26]); + OFFSET(THREAD_FPR27, loongarch_fpu, fpr[27]); + OFFSET(THREAD_FPR28, loongarch_fpu, fpr[28]); + OFFSET(THREAD_FPR29, loongarch_fpu, fpr[29]); + OFFSET(THREAD_FPR30, loongarch_fpu, fpr[30]); + OFFSET(THREAD_FPR31, loongarch_fpu, fpr[31]); + + OFFSET(THREAD_FCSR, loongarch_fpu, fcsr); + OFFSET(THREAD_FCC, loongarch_fpu, fcc); + BLANK(); +} + +void output_mm_defines(void) +{ + COMMENT("Size of struct page"); + DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); + BLANK(); + COMMENT("Linux mm_struct offsets."); + OFFSET(MM_USERS, mm_struct, mm_users); + OFFSET(MM_PGD, mm_struct, pgd); + OFFSET(MM_CONTEXT, mm_struct, context); + BLANK(); + DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); + DEFINE(_PMD_T_SIZE, sizeof(pmd_t)); + DEFINE(_PTE_T_SIZE, sizeof(pte_t)); + BLANK(); + DEFINE(_PGD_T_LOG2, PGD_T_LOG2); +#ifndef __PAGETABLE_PMD_FOLDED + DEFINE(_PMD_T_LOG2, PMD_T_LOG2); +#endif + DEFINE(_PTE_T_LOG2, PTE_T_LOG2); + BLANK(); + DEFINE(_PMD_SHIFT, PMD_SHIFT); + DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); + BLANK(); + DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); + DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); + DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); + BLANK(); + DEFINE(_PAGE_SHIFT, PAGE_SHIFT); + DEFINE(_PAGE_SIZE, PAGE_SIZE); + BLANK(); +} + +void output_sc_defines(void) +{ + COMMENT("Linux sigcontext offsets."); + OFFSET(SC_REGS, sigcontext, sc_regs); + OFFSET(SC_PC, sigcontext, sc_pc); + BLANK(); +} + +void output_signal_defines(void) +{ + COMMENT("Linux signal numbers."); + DEFINE(_SIGHUP, SIGHUP); + DEFINE(_SIGINT, SIGINT); + DEFINE(_SIGQUIT, SIGQUIT); + DEFINE(_SIGILL, SIGILL); + DEFINE(_SIGTRAP, SIGTRAP); + DEFINE(_SIGIOT, SIGIOT); + DEFINE(_SIGABRT, SIGABRT); + DEFINE(_SIGFPE, SIGFPE); + DEFINE(_SIGKILL, SIGKILL); + DEFINE(_SIGBUS, SIGBUS); + DEFINE(_SIGSEGV, SIGSEGV); + DEFINE(_SIGSYS, SIGSYS); + DEFINE(_SIGPIPE, SIGPIPE); + DEFINE(_SIGALRM, SIGALRM); + DEFINE(_SIGTERM, SIGTERM); + DEFINE(_SIGUSR1, SIGUSR1); + DEFINE(_SIGUSR2, SIGUSR2); + DEFINE(_SIGCHLD, SIGCHLD); + DEFINE(_SIGPWR, SIGPWR); + DEFINE(_SIGWINCH, SIGWINCH); + DEFINE(_SIGURG, SIGURG); + DEFINE(_SIGIO, SIGIO); + DEFINE(_SIGSTOP, SIGSTOP); + DEFINE(_SIGTSTP, SIGTSTP); + DEFINE(_SIGCONT, SIGCONT); + DEFINE(_SIGTTIN, SIGTTIN); + DEFINE(_SIGTTOU, SIGTTOU); + DEFINE(_SIGVTALRM, SIGVTALRM); + DEFINE(_SIGPROF, SIGPROF); + DEFINE(_SIGXCPU, SIGXCPU); + DEFINE(_SIGXFSZ, SIGXFSZ); + BLANK(); +} + +#ifdef CONFIG_SMP +void output_smpboot_defines(void) +{ + COMMENT("Linux smp cpu boot offsets."); + OFFSET(CPU_BOOT_STACK, secondary_data, stack); + OFFSET(CPU_BOOT_TINFO, secondary_data, thread_info); + BLANK(); +} +#endif diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c new file mode 100644 index 000000000..c7988f757 --- /dev/null +++ b/arch/loongarch/kernel/cacheinfo.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * LoongArch cacheinfo support + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/cacheinfo.h> +#include <linux/topology.h> +#include <asm/bootinfo.h> +#include <asm/cpu-info.h> + +int init_cache_level(unsigned int cpu) +{ + int cache_present = current_cpu_data.cache_leaves_present; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + + this_cpu_ci->num_levels = + current_cpu_data.cache_leaves[cache_present - 1].level; + this_cpu_ci->num_leaves = cache_present; + + return 0; +} + +static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, + struct cacheinfo *sib_leaf) +{ + return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE) + && !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE)); +} + +static void cache_cpumap_setup(unsigned int cpu) +{ + unsigned int index; + struct cacheinfo *this_leaf, *sib_leaf; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + + for (index = 0; index < this_cpu_ci->num_leaves; index++) { + unsigned int i; + + this_leaf = this_cpu_ci->info_list + index; + /* skip if shared_cpu_map is already populated */ + if (!cpumask_empty(&this_leaf->shared_cpu_map)) + continue; + + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + for_each_online_cpu(i) { + struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); + + if (i == cpu || !sib_cpu_ci->info_list || + (cpu_to_node(i) != cpu_to_node(cpu))) + continue; + + sib_leaf = sib_cpu_ci->info_list + index; + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { + cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); + cpumask_set_cpu(i, &this_leaf->shared_cpu_map); + } + } + } +} + +int populate_cache_leaves(unsigned int cpu) +{ + int i, cache_present = current_cpu_data.cache_leaves_present; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves; + + for (i = 0; i < cache_present; i++) { + cd = cdesc + i; + + this_leaf->type = cd->type; + this_leaf->level = cd->level; + this_leaf->coherency_line_size = cd->linesz; + this_leaf->number_of_sets = cd->sets; + this_leaf->ways_of_associativity = cd->ways; + this_leaf->size = cd->linesz * cd->sets * cd->ways; + this_leaf->priv = &cd->flags; + this_leaf++; + } + + cache_cpumap_setup(cpu); + this_cpu_ci->cpu_map_populated = true; + + return 0; +} diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c new file mode 100644 index 000000000..5adf0f736 --- /dev/null +++ b/arch/loongarch/kernel/cpu-probe.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Processor capabilities determination functions. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ptrace.h> +#include <linux/smp.h> +#include <linux/stddef.h> +#include <linux/export.h> +#include <linux/printk.h> +#include <linux/uaccess.h> + +#include <asm/cpu-features.h> +#include <asm/elf.h> +#include <asm/fpu.h> +#include <asm/loongarch.h> +#include <asm/pgtable-bits.h> +#include <asm/setup.h> + +/* Hardware capabilities */ +unsigned int elf_hwcap __read_mostly; +EXPORT_SYMBOL_GPL(elf_hwcap); + +/* + * Determine the FCSR mask for FPU hardware. + */ +static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c) +{ + unsigned long sr, mask, fcsr, fcsr0, fcsr1; + + fcsr = c->fpu_csr0; + mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; + + sr = read_csr_euen(); + enable_fpu(); + + fcsr0 = fcsr & mask; + write_fcsr(LOONGARCH_FCSR0, fcsr0); + fcsr0 = read_fcsr(LOONGARCH_FCSR0); + + fcsr1 = fcsr | ~mask; + write_fcsr(LOONGARCH_FCSR0, fcsr1); + fcsr1 = read_fcsr(LOONGARCH_FCSR0); + + write_fcsr(LOONGARCH_FCSR0, fcsr); + + write_csr_euen(sr); + + c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask; +} + +static inline void set_elf_platform(int cpu, const char *plat) +{ + if (cpu == 0) + __elf_platform = plat; +} + +/* MAP BASE */ +unsigned long vm_map_base; +EXPORT_SYMBOL(vm_map_base); + +static void cpu_probe_addrbits(struct cpuinfo_loongarch *c) +{ +#ifdef __NEED_ADDRBITS_PROBE + c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4; + c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12; + vm_map_base = 0UL - (1UL << c->vabits); +#endif +} + +static void set_isa(struct cpuinfo_loongarch *c, unsigned int isa) +{ + switch (isa) { + case LOONGARCH_CPU_ISA_LA64: + c->isa_level |= LOONGARCH_CPU_ISA_LA64; + fallthrough; + case LOONGARCH_CPU_ISA_LA32S: + c->isa_level |= LOONGARCH_CPU_ISA_LA32S; + fallthrough; + case LOONGARCH_CPU_ISA_LA32R: + c->isa_level |= LOONGARCH_CPU_ISA_LA32R; + break; + } +} + +static void cpu_probe_common(struct cpuinfo_loongarch *c) +{ + unsigned int config; + unsigned long asid_mask; + + c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | + LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH; + + elf_hwcap = HWCAP_LOONGARCH_CPUCFG; + + config = read_cpucfg(LOONGARCH_CPUCFG1); + if (config & CPUCFG1_UAL) { + c->options |= LOONGARCH_CPU_UAL; + elf_hwcap |= HWCAP_LOONGARCH_UAL; + } + if (config & CPUCFG1_CRC32) { + c->options |= LOONGARCH_CPU_CRC32; + elf_hwcap |= HWCAP_LOONGARCH_CRC32; + } + + + config = read_cpucfg(LOONGARCH_CPUCFG2); + if (config & CPUCFG2_LAM) { + c->options |= LOONGARCH_CPU_LAM; + elf_hwcap |= HWCAP_LOONGARCH_LAM; + } + if (config & CPUCFG2_FP) { + c->options |= LOONGARCH_CPU_FPU; + elf_hwcap |= HWCAP_LOONGARCH_FPU; + } + if (config & CPUCFG2_COMPLEX) { + c->options |= LOONGARCH_CPU_COMPLEX; + elf_hwcap |= HWCAP_LOONGARCH_COMPLEX; + } + if (config & CPUCFG2_CRYPTO) { + c->options |= LOONGARCH_CPU_CRYPTO; + elf_hwcap |= HWCAP_LOONGARCH_CRYPTO; + } + if (config & CPUCFG2_LVZP) { + c->options |= LOONGARCH_CPU_LVZ; + elf_hwcap |= HWCAP_LOONGARCH_LVZ; + } + + config = read_cpucfg(LOONGARCH_CPUCFG6); + if (config & CPUCFG6_PMP) + c->options |= LOONGARCH_CPU_PMP; + + config = iocsr_read32(LOONGARCH_IOCSR_FEATURES); + if (config & IOCSRF_CSRIPI) + c->options |= LOONGARCH_CPU_CSRIPI; + if (config & IOCSRF_EXTIOI) + c->options |= LOONGARCH_CPU_EXTIOI; + if (config & IOCSRF_FREQSCALE) + c->options |= LOONGARCH_CPU_SCALEFREQ; + if (config & IOCSRF_FLATMODE) + c->options |= LOONGARCH_CPU_FLATMODE; + if (config & IOCSRF_EIODECODE) + c->options |= LOONGARCH_CPU_EIODECODE; + if (config & IOCSRF_VM) + c->options |= LOONGARCH_CPU_HYPERVISOR; + + config = csr_read32(LOONGARCH_CSR_ASID); + config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT; + asid_mask = GENMASK(config - 1, 0); + set_cpu_asid_mask(c, asid_mask); + + config = read_csr_prcfg1(); + c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0); + c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK); + + config = read_csr_prcfg3(); + switch (config & CSR_CONF3_TLBTYPE) { + case 0: + c->tlbsizemtlb = 0; + c->tlbsizestlbsets = 0; + c->tlbsizestlbways = 0; + c->tlbsize = 0; + break; + case 1: + c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1; + c->tlbsizestlbsets = 0; + c->tlbsizestlbways = 0; + c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways; + break; + case 2: + c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1; + c->tlbsizestlbsets = 1 << ((config & CSR_CONF3_STLBIDX) >> CSR_CONF3_STLBIDX_SHIFT); + c->tlbsizestlbways = ((config & CSR_CONF3_STLBWAYS) >> CSR_CONF3_STLBWAYS_SHIFT) + 1; + c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways; + break; + default: + pr_warn("Warning: unknown TLB type\n"); + } +} + +#define MAX_NAME_LEN 32 +#define VENDOR_OFFSET 0 +#define CPUNAME_OFFSET 9 + +static char cpu_full_name[MAX_NAME_LEN] = " - "; + +static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu) +{ + uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]); + uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]); + + if (!__cpu_full_name[cpu]) + __cpu_full_name[cpu] = cpu_full_name; + + *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); + *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); + + switch (c->processor_id & PRID_SERIES_MASK) { + case PRID_SERIES_LA132: + c->cputype = CPU_LOONGSON32; + set_isa(c, LOONGARCH_CPU_ISA_LA32S); + __cpu_family[cpu] = "Loongson-32bit"; + pr_info("32-bit Loongson Processor probed (LA132 Core)\n"); + break; + case PRID_SERIES_LA264: + c->cputype = CPU_LOONGSON64; + set_isa(c, LOONGARCH_CPU_ISA_LA64); + __cpu_family[cpu] = "Loongson-64bit"; + pr_info("64-bit Loongson Processor probed (LA264 Core)\n"); + break; + case PRID_SERIES_LA364: + c->cputype = CPU_LOONGSON64; + set_isa(c, LOONGARCH_CPU_ISA_LA64); + __cpu_family[cpu] = "Loongson-64bit"; + pr_info("64-bit Loongson Processor probed (LA364 Core)\n"); + break; + case PRID_SERIES_LA464: + c->cputype = CPU_LOONGSON64; + set_isa(c, LOONGARCH_CPU_ISA_LA64); + __cpu_family[cpu] = "Loongson-64bit"; + pr_info("64-bit Loongson Processor probed (LA464 Core)\n"); + break; + case PRID_SERIES_LA664: + c->cputype = CPU_LOONGSON64; + set_isa(c, LOONGARCH_CPU_ISA_LA64); + __cpu_family[cpu] = "Loongson-64bit"; + pr_info("64-bit Loongson Processor probed (LA664 Core)\n"); + break; + default: /* Default to 64 bit */ + c->cputype = CPU_LOONGSON64; + set_isa(c, LOONGARCH_CPU_ISA_LA64); + __cpu_family[cpu] = "Loongson-64bit"; + pr_info("64-bit Loongson Processor probed (Unknown Core)\n"); + } +} + +#ifdef CONFIG_64BIT +/* For use by uaccess.h */ +u64 __ua_limit; +EXPORT_SYMBOL(__ua_limit); +#endif + +const char *__cpu_family[NR_CPUS]; +const char *__cpu_full_name[NR_CPUS]; +const char *__elf_platform; + +static void cpu_report(void) +{ + struct cpuinfo_loongarch *c = ¤t_cpu_data; + + pr_info("CPU%d revision is: %08x (%s)\n", + smp_processor_id(), c->processor_id, cpu_family_string()); + if (c->options & LOONGARCH_CPU_FPU) + pr_info("FPU%d revision is: %08x\n", smp_processor_id(), c->fpu_vers); +} + +void cpu_probe(void) +{ + unsigned int cpu = smp_processor_id(); + struct cpuinfo_loongarch *c = ¤t_cpu_data; + + /* + * Set a default ELF platform, cpu probe may later + * overwrite it with a more precise value + */ + set_elf_platform(cpu, "loongarch"); + + c->cputype = CPU_UNKNOWN; + c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0); + c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) & CPUCFG2_FPVERS) >> 3; + + c->fpu_csr0 = FPU_CSR_RN; + c->fpu_mask = FPU_CSR_RSVD; + + cpu_probe_common(c); + + per_cpu_trap_init(cpu); + + switch (c->processor_id & PRID_COMP_MASK) { + case PRID_COMP_LOONGSON: + cpu_probe_loongson(c, cpu); + break; + } + + BUG_ON(!__cpu_family[cpu]); + BUG_ON(c->cputype == CPU_UNKNOWN); + + cpu_probe_addrbits(c); + +#ifdef CONFIG_64BIT + if (cpu == 0) + __ua_limit = ~((1ull << cpu_vabits) - 1); +#endif + + cpu_report(); +} diff --git a/arch/loongarch/kernel/crash_dump.c b/arch/loongarch/kernel/crash_dump.c new file mode 100644 index 000000000..e559307c1 --- /dev/null +++ b/arch/loongarch/kernel/crash_dump.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/crash_dump.h> +#include <linux/io.h> +#include <linux/uio.h> + +ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, + size_t csize, unsigned long offset) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB); + if (!vaddr) + return -ENOMEM; + + csize = copy_to_iter(vaddr + offset, csize, iter); + + memunmap(vaddr); + + return csize; +} diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c new file mode 100644 index 000000000..7a9c6a9dd --- /dev/null +++ b/arch/loongarch/kernel/dma.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/acpi.h> +#include <linux/dma-direct.h> + +void acpi_arch_dma_setup(struct device *dev) +{ + int ret; + u64 mask, end = 0; + const struct bus_dma_region *map = NULL; + + ret = acpi_dma_get_range(dev, &map); + if (!ret && map) { + const struct bus_dma_region *r = map; + + for (end = 0; r->size; r++) { + if (r->dma_start + r->size - 1 > end) + end = r->dma_start + r->size - 1; + } + + mask = DMA_BIT_MASK(ilog2(end) + 1); + dev->bus_dma_limit = end; + dev->dma_range_map = map; + dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); + *dev->dma_mask = min(*dev->dma_mask, mask); + } + +} diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S new file mode 100644 index 000000000..8c1d229a2 --- /dev/null +++ b/arch/loongarch/kernel/efi-header.S @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/pe.h> +#include <linux/sizes.h> + + .macro __EFI_PE_HEADER + .long PE_MAGIC +.Lcoff_header: + .short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */ + .short .Lsection_count /* NumberOfSections */ + .long 0 /* TimeDateStamp */ + .long 0 /* PointerToSymbolTable */ + .long 0 /* NumberOfSymbols */ + .short .Lsection_table - .Loptional_header /* SizeOfOptionalHeader */ + .short IMAGE_FILE_DEBUG_STRIPPED | \ + IMAGE_FILE_EXECUTABLE_IMAGE | \ + IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */ + +.Loptional_header: + .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */ + .byte 0x02 /* MajorLinkerVersion */ + .byte 0x14 /* MinorLinkerVersion */ + .long __inittext_end - .Lefi_header_end /* SizeOfCode */ + .long _end - __initdata_begin /* SizeOfInitializedData */ + .long 0 /* SizeOfUninitializedData */ + .long __efistub_efi_pe_entry - _head /* AddressOfEntryPoint */ + .long .Lefi_header_end - _head /* BaseOfCode */ + +.Lextra_header_fields: + .quad 0 /* ImageBase */ + .long PECOFF_SEGMENT_ALIGN /* SectionAlignment */ + .long PECOFF_FILE_ALIGN /* FileAlignment */ + .short 0 /* MajorOperatingSystemVersion */ + .short 0 /* MinorOperatingSystemVersion */ + .short LINUX_EFISTUB_MAJOR_VERSION /* MajorImageVersion */ + .short LINUX_EFISTUB_MINOR_VERSION /* MinorImageVersion */ + .short 0 /* MajorSubsystemVersion */ + .short 0 /* MinorSubsystemVersion */ + .long 0 /* Win32VersionValue */ + + .long _end - _head /* SizeOfImage */ + + /* Everything before the kernel image is considered part of the header */ + .long .Lefi_header_end - _head /* SizeOfHeaders */ + .long 0 /* CheckSum */ + .short IMAGE_SUBSYSTEM_EFI_APPLICATION /* Subsystem */ + .short 0 /* DllCharacteristics */ + .quad 0 /* SizeOfStackReserve */ + .quad 0 /* SizeOfStackCommit */ + .quad 0 /* SizeOfHeapReserve */ + .quad 0 /* SizeOfHeapCommit */ + .long 0 /* LoaderFlags */ + .long (.Lsection_table - .) / 8 /* NumberOfRvaAndSizes */ + + .quad 0 /* ExportTable */ + .quad 0 /* ImportTable */ + .quad 0 /* ResourceTable */ + .quad 0 /* ExceptionTable */ + .quad 0 /* CertificationTable */ + .quad 0 /* BaseRelocationTable */ + + /* Section table */ +.Lsection_table: + .ascii ".text\0\0\0" + .long __inittext_end - .Lefi_header_end /* VirtualSize */ + .long .Lefi_header_end - _head /* VirtualAddress */ + .long __inittext_end - .Lefi_header_end /* SizeOfRawData */ + .long .Lefi_header_end - _head /* PointerToRawData */ + + .long 0 /* PointerToRelocations */ + .long 0 /* PointerToLineNumbers */ + .short 0 /* NumberOfRelocations */ + .short 0 /* NumberOfLineNumbers */ + .long IMAGE_SCN_CNT_CODE | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_EXECUTE /* Characteristics */ + + .ascii ".data\0\0\0" + .long _end - __initdata_begin /* VirtualSize */ + .long __initdata_begin - _head /* VirtualAddress */ + .long _edata - __initdata_begin /* SizeOfRawData */ + .long __initdata_begin - _head /* PointerToRawData */ + + .long 0 /* PointerToRelocations */ + .long 0 /* PointerToLineNumbers */ + .short 0 /* NumberOfRelocations */ + .short 0 /* NumberOfLineNumbers */ + .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_WRITE /* Characteristics */ + + .set .Lsection_count, (. - .Lsection_table) / 40 + + .balign 0x10000 /* PECOFF_SEGMENT_ALIGN */ +.Lefi_header_end: + .endm diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c new file mode 100644 index 000000000..a31329971 --- /dev/null +++ b/arch/loongarch/kernel/efi.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * EFI initialization + * + * Author: Jianmin Lv <lvjianmin@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/acpi.h> +#include <linux/efi.h> +#include <linux/efi-bgrt.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/io.h> +#include <linux/kobject.h> +#include <linux/memblock.h> +#include <linux/reboot.h> +#include <linux/uaccess.h> + +#include <asm/early_ioremap.h> +#include <asm/efi.h> +#include <asm/loongson.h> + +static unsigned long efi_nr_tables; +static unsigned long efi_config_table; + +static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR; + +static efi_system_table_t *efi_systab; +static efi_config_table_type_t arch_tables[] __initdata = { + {LINUX_EFI_BOOT_MEMMAP_GUID, &boot_memmap, "MEMMAP" }, + {}, +}; + +void __init efi_runtime_init(void) +{ + if (!efi_enabled(EFI_BOOT)) + return; + + if (efi_runtime_disabled()) { + pr_info("EFI runtime services will be disabled.\n"); + return; + } + + efi.runtime = (efi_runtime_services_t *)efi_systab->runtime; + efi.runtime_version = (unsigned int)efi.runtime->hdr.revision; + + efi_native_runtime_setup(); + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); +} + +void __init efi_init(void) +{ + int size; + void *config_tables; + struct efi_boot_memmap *tbl; + + if (!efi_system_table) + return; + + efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab)); + if (!efi_systab) { + pr_err("Can't find EFI system table.\n"); + return; + } + + efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor); + + set_bit(EFI_64BIT, &efi.flags); + efi_nr_tables = efi_systab->nr_tables; + efi_config_table = (unsigned long)efi_systab->tables; + + size = sizeof(efi_config_table_t); + config_tables = early_memremap(efi_config_table, efi_nr_tables * size); + efi_config_parse_tables(config_tables, efi_systab->nr_tables, arch_tables); + early_memunmap(config_tables, efi_nr_tables * size); + + set_bit(EFI_CONFIG_TABLES, &efi.flags); + + if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) + memblock_reserve(screen_info.lfb_base, screen_info.lfb_size); + + if (boot_memmap == EFI_INVALID_TABLE_ADDR) + return; + + tbl = early_memremap_ro(boot_memmap, sizeof(*tbl)); + if (tbl) { + struct efi_memory_map_data data; + + data.phys_map = boot_memmap + sizeof(*tbl); + data.size = tbl->map_size; + data.desc_size = tbl->desc_size; + data.desc_version = tbl->desc_ver; + + if (efi_memmap_init_early(&data) < 0) + panic("Unable to map EFI memory map.\n"); + + early_memunmap(tbl, sizeof(*tbl)); + } +} diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c new file mode 100644 index 000000000..0fa81ced2 --- /dev/null +++ b/arch/loongarch/kernel/elf.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/binfmts.h> +#include <linux/elf.h> +#include <linux/export.h> +#include <linux/sched.h> + +#include <asm/cpu-features.h> +#include <asm/cpu-info.h> + +int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, + bool is_interp, struct arch_elf_state *state) +{ + return 0; +} + +int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr, + struct arch_elf_state *state) +{ + return 0; +} diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S new file mode 100644 index 000000000..d53b631c9 --- /dev/null +++ b/arch/loongarch/kernel/entry.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/loongarch.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/thread_info.h> + + .text + .cfi_sections .debug_frame + .align 5 +SYM_FUNC_START(handle_syscall) + csrrd t0, PERCPU_BASE_KS + la.abs t1, kernelsp + add.d t1, t1, t0 + move t2, sp + ld.d sp, t1, 0 + + addi.d sp, sp, -PT_SIZE + cfi_st t2, PT_R3 + cfi_rel_offset sp, PT_R3 + st.d zero, sp, PT_R0 + csrrd t2, LOONGARCH_CSR_PRMD + st.d t2, sp, PT_PRMD + csrrd t2, LOONGARCH_CSR_CRMD + st.d t2, sp, PT_CRMD + csrrd t2, LOONGARCH_CSR_EUEN + st.d t2, sp, PT_EUEN + csrrd t2, LOONGARCH_CSR_ECFG + st.d t2, sp, PT_ECFG + csrrd t2, LOONGARCH_CSR_ESTAT + st.d t2, sp, PT_ESTAT + cfi_st ra, PT_R1 + cfi_st a0, PT_R4 + cfi_st a1, PT_R5 + cfi_st a2, PT_R6 + cfi_st a3, PT_R7 + cfi_st a4, PT_R8 + cfi_st a5, PT_R9 + cfi_st a6, PT_R10 + cfi_st a7, PT_R11 + csrrd ra, LOONGARCH_CSR_ERA + st.d ra, sp, PT_ERA + cfi_rel_offset ra, PT_ERA + + cfi_st tp, PT_R2 + cfi_st u0, PT_R21 + cfi_st fp, PT_R22 + + SAVE_STATIC + + move u0, t0 + li.d tp, ~_THREAD_MASK + and tp, tp, sp + + move a0, sp + bl do_syscall + + RESTORE_ALL_AND_RET +SYM_FUNC_END(handle_syscall) + +SYM_CODE_START(ret_from_fork) + bl schedule_tail # a0 = struct task_struct *prev + move a0, sp + bl syscall_exit_to_user_mode + RESTORE_STATIC + RESTORE_SOME + RESTORE_SP_AND_RET +SYM_CODE_END(ret_from_fork) + +SYM_CODE_START(ret_from_kernel_thread) + bl schedule_tail # a0 = struct task_struct *prev + move a0, s1 + jirl ra, s0, 0 + move a0, sp + bl syscall_exit_to_user_mode + RESTORE_STATIC + RESTORE_SOME + RESTORE_SP_AND_RET +SYM_CODE_END(ret_from_kernel_thread) diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c new file mode 100644 index 000000000..6d56a463b --- /dev/null +++ b/arch/loongarch/kernel/env.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/acpi.h> +#include <linux/efi.h> +#include <linux/export.h> +#include <linux/memblock.h> +#include <asm/early_ioremap.h> +#include <asm/bootinfo.h> +#include <asm/loongson.h> + +u64 efi_system_table; +struct loongson_system_configuration loongson_sysconf; +EXPORT_SYMBOL(loongson_sysconf); + +void __init init_environ(void) +{ + int efi_boot = fw_arg0; + char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); + + if (efi_boot) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); + + strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + early_memunmap(cmdline, COMMAND_LINE_SIZE); + + efi_system_table = fw_arg2; +} + +static int __init init_cpu_fullname(void) +{ + int cpu; + + if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) { + for (cpu = 0; cpu < NR_CPUS; cpu++) + __cpu_full_name[cpu] = loongson_sysconf.cpuname; + } + return 0; +} +arch_initcall(init_cpu_fullname); + +static ssize_t boardinfo_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, + "BIOS Information\n" + "Vendor\t\t\t: %s\n" + "Version\t\t\t: %s\n" + "ROM Size\t\t: %d KB\n" + "Release Date\t\t: %s\n\n" + "Board Information\n" + "Manufacturer\t\t: %s\n" + "Board Name\t\t: %s\n" + "Family\t\t\t: LOONGSON64\n\n", + b_info.bios_vendor, b_info.bios_version, + b_info.bios_size, b_info.bios_release_date, + b_info.board_vendor, b_info.board_name); +} + +static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444, + boardinfo_show, NULL); + +static int __init boardinfo_init(void) +{ + struct kobject *loongson_kobj; + + loongson_kobj = kobject_create_and_add("loongson", firmware_kobj); + + return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr); +} +late_initcall(boardinfo_init); diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S new file mode 100644 index 000000000..576b3370a --- /dev/null +++ b/arch/loongarch/kernel/fpu.S @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Lu Zeng <zenglu@loongson.cn> + * Pei Huang <huangpei@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/asm-offsets.h> +#include <asm/errno.h> +#include <asm/export.h> +#include <asm/fpregdef.h> +#include <asm/loongarch.h> +#include <asm/regdef.h> + +#define FPU_REG_WIDTH 8 +#define LSX_REG_WIDTH 16 +#define LASX_REG_WIDTH 32 + + .macro EX insn, reg, src, offs +.ex\@: \insn \reg, \src, \offs + .section __ex_table,"a" + PTR .ex\@, fault + .previous + .endm + + .macro sc_save_fp base + EX fst.d $f0, \base, (0 * FPU_REG_WIDTH) + EX fst.d $f1, \base, (1 * FPU_REG_WIDTH) + EX fst.d $f2, \base, (2 * FPU_REG_WIDTH) + EX fst.d $f3, \base, (3 * FPU_REG_WIDTH) + EX fst.d $f4, \base, (4 * FPU_REG_WIDTH) + EX fst.d $f5, \base, (5 * FPU_REG_WIDTH) + EX fst.d $f6, \base, (6 * FPU_REG_WIDTH) + EX fst.d $f7, \base, (7 * FPU_REG_WIDTH) + EX fst.d $f8, \base, (8 * FPU_REG_WIDTH) + EX fst.d $f9, \base, (9 * FPU_REG_WIDTH) + EX fst.d $f10, \base, (10 * FPU_REG_WIDTH) + EX fst.d $f11, \base, (11 * FPU_REG_WIDTH) + EX fst.d $f12, \base, (12 * FPU_REG_WIDTH) + EX fst.d $f13, \base, (13 * FPU_REG_WIDTH) + EX fst.d $f14, \base, (14 * FPU_REG_WIDTH) + EX fst.d $f15, \base, (15 * FPU_REG_WIDTH) + EX fst.d $f16, \base, (16 * FPU_REG_WIDTH) + EX fst.d $f17, \base, (17 * FPU_REG_WIDTH) + EX fst.d $f18, \base, (18 * FPU_REG_WIDTH) + EX fst.d $f19, \base, (19 * FPU_REG_WIDTH) + EX fst.d $f20, \base, (20 * FPU_REG_WIDTH) + EX fst.d $f21, \base, (21 * FPU_REG_WIDTH) + EX fst.d $f22, \base, (22 * FPU_REG_WIDTH) + EX fst.d $f23, \base, (23 * FPU_REG_WIDTH) + EX fst.d $f24, \base, (24 * FPU_REG_WIDTH) + EX fst.d $f25, \base, (25 * FPU_REG_WIDTH) + EX fst.d $f26, \base, (26 * FPU_REG_WIDTH) + EX fst.d $f27, \base, (27 * FPU_REG_WIDTH) + EX fst.d $f28, \base, (28 * FPU_REG_WIDTH) + EX fst.d $f29, \base, (29 * FPU_REG_WIDTH) + EX fst.d $f30, \base, (30 * FPU_REG_WIDTH) + EX fst.d $f31, \base, (31 * FPU_REG_WIDTH) + .endm + + .macro sc_restore_fp base + EX fld.d $f0, \base, (0 * FPU_REG_WIDTH) + EX fld.d $f1, \base, (1 * FPU_REG_WIDTH) + EX fld.d $f2, \base, (2 * FPU_REG_WIDTH) + EX fld.d $f3, \base, (3 * FPU_REG_WIDTH) + EX fld.d $f4, \base, (4 * FPU_REG_WIDTH) + EX fld.d $f5, \base, (5 * FPU_REG_WIDTH) + EX fld.d $f6, \base, (6 * FPU_REG_WIDTH) + EX fld.d $f7, \base, (7 * FPU_REG_WIDTH) + EX fld.d $f8, \base, (8 * FPU_REG_WIDTH) + EX fld.d $f9, \base, (9 * FPU_REG_WIDTH) + EX fld.d $f10, \base, (10 * FPU_REG_WIDTH) + EX fld.d $f11, \base, (11 * FPU_REG_WIDTH) + EX fld.d $f12, \base, (12 * FPU_REG_WIDTH) + EX fld.d $f13, \base, (13 * FPU_REG_WIDTH) + EX fld.d $f14, \base, (14 * FPU_REG_WIDTH) + EX fld.d $f15, \base, (15 * FPU_REG_WIDTH) + EX fld.d $f16, \base, (16 * FPU_REG_WIDTH) + EX fld.d $f17, \base, (17 * FPU_REG_WIDTH) + EX fld.d $f18, \base, (18 * FPU_REG_WIDTH) + EX fld.d $f19, \base, (19 * FPU_REG_WIDTH) + EX fld.d $f20, \base, (20 * FPU_REG_WIDTH) + EX fld.d $f21, \base, (21 * FPU_REG_WIDTH) + EX fld.d $f22, \base, (22 * FPU_REG_WIDTH) + EX fld.d $f23, \base, (23 * FPU_REG_WIDTH) + EX fld.d $f24, \base, (24 * FPU_REG_WIDTH) + EX fld.d $f25, \base, (25 * FPU_REG_WIDTH) + EX fld.d $f26, \base, (26 * FPU_REG_WIDTH) + EX fld.d $f27, \base, (27 * FPU_REG_WIDTH) + EX fld.d $f28, \base, (28 * FPU_REG_WIDTH) + EX fld.d $f29, \base, (29 * FPU_REG_WIDTH) + EX fld.d $f30, \base, (30 * FPU_REG_WIDTH) + EX fld.d $f31, \base, (31 * FPU_REG_WIDTH) + .endm + + .macro sc_save_fcc base, tmp0, tmp1 + movcf2gr \tmp0, $fcc0 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc1 + bstrins.d \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc2 + bstrins.d \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc3 + bstrins.d \tmp1, \tmp0, 31, 24 + movcf2gr \tmp0, $fcc4 + bstrins.d \tmp1, \tmp0, 39, 32 + movcf2gr \tmp0, $fcc5 + bstrins.d \tmp1, \tmp0, 47, 40 + movcf2gr \tmp0, $fcc6 + bstrins.d \tmp1, \tmp0, 55, 48 + movcf2gr \tmp0, $fcc7 + bstrins.d \tmp1, \tmp0, 63, 56 + EX st.d \tmp1, \base, 0 + .endm + + .macro sc_restore_fcc base, tmp0, tmp1 + EX ld.d \tmp0, \base, 0 + bstrpick.d \tmp1, \tmp0, 7, 0 + movgr2cf $fcc0, \tmp1 + bstrpick.d \tmp1, \tmp0, 15, 8 + movgr2cf $fcc1, \tmp1 + bstrpick.d \tmp1, \tmp0, 23, 16 + movgr2cf $fcc2, \tmp1 + bstrpick.d \tmp1, \tmp0, 31, 24 + movgr2cf $fcc3, \tmp1 + bstrpick.d \tmp1, \tmp0, 39, 32 + movgr2cf $fcc4, \tmp1 + bstrpick.d \tmp1, \tmp0, 47, 40 + movgr2cf $fcc5, \tmp1 + bstrpick.d \tmp1, \tmp0, 55, 48 + movgr2cf $fcc6, \tmp1 + bstrpick.d \tmp1, \tmp0, 63, 56 + movgr2cf $fcc7, \tmp1 + .endm + + .macro sc_save_fcsr base, tmp0 + movfcsr2gr \tmp0, fcsr0 + EX st.w \tmp0, \base, 0 + .endm + + .macro sc_restore_fcsr base, tmp0 + EX ld.w \tmp0, \base, 0 + movgr2fcsr fcsr0, \tmp0 + .endm + +/* + * Save a thread's fp context. + */ +SYM_FUNC_START(_save_fp) + fpu_save_csr a0 t1 + fpu_save_double a0 t1 # clobbers t1 + fpu_save_cc a0 t1 t2 # clobbers t1, t2 + jr ra +SYM_FUNC_END(_save_fp) +EXPORT_SYMBOL(_save_fp) + +/* + * Restore a thread's fp context. + */ +SYM_FUNC_START(_restore_fp) + fpu_restore_double a0 t1 # clobbers t1 + fpu_restore_csr a0 t1 + fpu_restore_cc a0 t1 t2 # clobbers t1, t2 + jr ra +SYM_FUNC_END(_restore_fp) + +/* + * Load the FPU with signalling NANS. This bit pattern we're using has + * the property that no matter whether considered as single or as double + * precision represents signaling NANS. + * + * The value to initialize fcsr0 to comes in $a0. + */ + +SYM_FUNC_START(_init_fpu) + li.w t1, CSR_EUEN_FPEN + csrxchg t1, t1, LOONGARCH_CSR_EUEN + + movgr2fcsr fcsr0, a0 + + li.w t1, -1 # SNaN + + movgr2fr.d $f0, t1 + movgr2fr.d $f1, t1 + movgr2fr.d $f2, t1 + movgr2fr.d $f3, t1 + movgr2fr.d $f4, t1 + movgr2fr.d $f5, t1 + movgr2fr.d $f6, t1 + movgr2fr.d $f7, t1 + movgr2fr.d $f8, t1 + movgr2fr.d $f9, t1 + movgr2fr.d $f10, t1 + movgr2fr.d $f11, t1 + movgr2fr.d $f12, t1 + movgr2fr.d $f13, t1 + movgr2fr.d $f14, t1 + movgr2fr.d $f15, t1 + movgr2fr.d $f16, t1 + movgr2fr.d $f17, t1 + movgr2fr.d $f18, t1 + movgr2fr.d $f19, t1 + movgr2fr.d $f20, t1 + movgr2fr.d $f21, t1 + movgr2fr.d $f22, t1 + movgr2fr.d $f23, t1 + movgr2fr.d $f24, t1 + movgr2fr.d $f25, t1 + movgr2fr.d $f26, t1 + movgr2fr.d $f27, t1 + movgr2fr.d $f28, t1 + movgr2fr.d $f29, t1 + movgr2fr.d $f30, t1 + movgr2fr.d $f31, t1 + + jr ra +SYM_FUNC_END(_init_fpu) + +/* + * a0: fpregs + * a1: fcc + * a2: fcsr + */ +SYM_FUNC_START(_save_fp_context) + sc_save_fcc a1 t1 t2 + sc_save_fcsr a2 t1 + sc_save_fp a0 + li.w a0, 0 # success + jr ra +SYM_FUNC_END(_save_fp_context) + +/* + * a0: fpregs + * a1: fcc + * a2: fcsr + */ +SYM_FUNC_START(_restore_fp_context) + sc_restore_fp a0 + sc_restore_fcc a1 t1 t2 + sc_restore_fcsr a2 t1 + li.w a0, 0 # success + jr ra +SYM_FUNC_END(_restore_fp_context) + +SYM_FUNC_START(fault) + li.w a0, -EFAULT # failure + jr ra +SYM_FUNC_END(fault) diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S new file mode 100644 index 000000000..75e5be807 --- /dev/null +++ b/arch/loongarch/kernel/genex.S @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2002, 2007 Maciej W. Rozycki + * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. + */ +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/loongarch.h> +#include <asm/regdef.h> +#include <asm/fpregdef.h> +#include <asm/stackframe.h> +#include <asm/thread_info.h> + + .align 5 +SYM_FUNC_START(__arch_cpu_idle) + /* start of rollback region */ + LONG_L t0, tp, TI_FLAGS + nop + andi t0, t0, _TIF_NEED_RESCHED + bnez t0, 1f + nop + nop + nop + idle 0 + /* end of rollback region */ +1: jr ra +SYM_FUNC_END(__arch_cpu_idle) + +SYM_FUNC_START(handle_vint) + BACKUP_T0T1 + SAVE_ALL + la.abs t1, __arch_cpu_idle + LONG_L t0, sp, PT_ERA + /* 32 byte rollback region */ + ori t0, t0, 0x1f + xori t0, t0, 0x1f + bne t0, t1, 1f + LONG_S t0, sp, PT_ERA +1: move a0, sp + move a1, sp + la.abs t0, do_vint + jirl ra, t0, 0 + RESTORE_ALL_AND_RET +SYM_FUNC_END(handle_vint) + +SYM_FUNC_START(except_vec_cex) + b cache_parity_error +SYM_FUNC_END(except_vec_cex) + + .macro build_prep_badv + csrrd t0, LOONGARCH_CSR_BADV + PTR_S t0, sp, PT_BVADDR + .endm + + .macro build_prep_fcsr + movfcsr2gr a1, fcsr0 + .endm + + .macro build_prep_none + .endm + + .macro BUILD_HANDLER exception handler prep + .align 5 + SYM_FUNC_START(handle_\exception) + BACKUP_T0T1 + SAVE_ALL + build_prep_\prep + move a0, sp + la.abs t0, do_\handler + jirl ra, t0, 0 + RESTORE_ALL_AND_RET + SYM_FUNC_END(handle_\exception) + .endm + + BUILD_HANDLER ade ade badv + BUILD_HANDLER ale ale badv + BUILD_HANDLER bp bp none + BUILD_HANDLER fpe fpe fcsr + BUILD_HANDLER fpu fpu none + BUILD_HANDLER lsx lsx none + BUILD_HANDLER lasx lasx none + BUILD_HANDLER lbt lbt none + BUILD_HANDLER ri ri none + BUILD_HANDLER watch watch none + BUILD_HANDLER reserved reserved none /* others */ + +SYM_FUNC_START(handle_sys) + la.abs t0, handle_syscall + jr t0 +SYM_FUNC_END(handle_sys) diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S new file mode 100644 index 000000000..84970e266 --- /dev/null +++ b/arch/loongarch/kernel/head.S @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/init.h> +#include <linux/threads.h> + +#include <asm/addrspace.h> +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/bug.h> +#include <asm/regdef.h> +#include <asm/loongarch.h> +#include <asm/stackframe.h> + +#ifdef CONFIG_EFI_STUB + +#include "efi-header.S" + + __HEAD + +_head: + .word MZ_MAGIC /* "MZ", MS-DOS header */ + .org 0x8 + .dword kernel_entry /* Kernel entry point */ + .dword _end - _text /* Kernel image effective size */ + .quad 0 /* Kernel image load offset from start of RAM */ + .org 0x3c /* 0x20 ~ 0x3b reserved */ + .long pe_header - _head /* Offset to the PE header */ + +pe_header: + __EFI_PE_HEADER + +SYM_DATA(kernel_asize, .long _end - _text); +SYM_DATA(kernel_fsize, .long _edata - _text); +SYM_DATA(kernel_offset, .long kernel_offset - _text); + +#endif + + __REF + + .align 12 + +SYM_CODE_START(kernel_entry) # kernel entry point + + /* Config direct window and set PG */ + li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx + csrwr t0, LOONGARCH_CSR_DMWIN0 + li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx + csrwr t0, LOONGARCH_CSR_DMWIN1 + + /* We might not get launched at the address the kernel is linked to, + so we jump there. */ + la.abs t0, 0f + jr t0 +0: + /* Enable PG */ + li.w t0, 0xb0 # PLV=0, IE=0, PG=1 + csrwr t0, LOONGARCH_CSR_CRMD + li.w t0, 0x04 # PLV=0, PIE=1, PWE=0 + csrwr t0, LOONGARCH_CSR_PRMD + li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 + csrwr t0, LOONGARCH_CSR_EUEN + + la.pcrel t0, __bss_start # clear .bss + st.d zero, t0, 0 + la.pcrel t1, __bss_stop - LONGSIZE +1: + addi.d t0, t0, LONGSIZE + st.d zero, t0, 0 + bne t0, t1, 1b + + la.pcrel t0, fw_arg0 + st.d a0, t0, 0 # firmware arguments + la.pcrel t0, fw_arg1 + st.d a1, t0, 0 + la.pcrel t0, fw_arg2 + st.d a2, t0, 0 + + /* KSave3 used for percpu base, initialized as 0 */ + csrwr zero, PERCPU_BASE_KS + /* GPR21 used for percpu base (runtime), initialized as 0 */ + move u0, zero + + la.pcrel tp, init_thread_union + /* Set the SP after an empty pt_regs. */ + PTR_LI sp, (_THREAD_SIZE - PT_SIZE) + PTR_ADD sp, sp, tp + set_saved_sp sp, t0, t1 + + bl start_kernel + ASM_BUG() + +SYM_CODE_END(kernel_entry) + +#ifdef CONFIG_SMP + +/* + * SMP slave cpus entry point. Board specific code for bootstrap calls this + * function after setting up the stack and tp registers. + */ +SYM_CODE_START(smpboot_entry) + li.d t0, CSR_DMW0_INIT # UC, PLV0 + csrwr t0, LOONGARCH_CSR_DMWIN0 + li.d t0, CSR_DMW1_INIT # CA, PLV0 + csrwr t0, LOONGARCH_CSR_DMWIN1 + + la.abs t0, 0f + jr t0 +0: + /* Enable PG */ + li.w t0, 0xb0 # PLV=0, IE=0, PG=1 + csrwr t0, LOONGARCH_CSR_CRMD + li.w t0, 0x04 # PLV=0, PIE=1, PWE=0 + csrwr t0, LOONGARCH_CSR_PRMD + li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 + csrwr t0, LOONGARCH_CSR_EUEN + + la.abs t0, cpuboot_data + ld.d sp, t0, CPU_BOOT_STACK + ld.d tp, t0, CPU_BOOT_TINFO + + bl start_secondary + ASM_BUG() + +SYM_CODE_END(smpboot_entry) + +#endif /* CONFIG_SMP */ + +SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE) diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c new file mode 100644 index 000000000..1a65d0527 --- /dev/null +++ b/arch/loongarch/kernel/idle.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * LoongArch idle loop support. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/cpu.h> +#include <linux/irqflags.h> +#include <asm/cpu.h> +#include <asm/idle.h> + +void __cpuidle arch_cpu_idle(void) +{ + raw_local_irq_enable(); + __arch_cpu_idle(); /* idle instruction needs irq enabled */ +} diff --git a/arch/loongarch/kernel/image-vars.h b/arch/loongarch/kernel/image-vars.h new file mode 100644 index 000000000..88f5d8170 --- /dev/null +++ b/arch/loongarch/kernel/image-vars.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __LOONGARCH_KERNEL_IMAGE_VARS_H +#define __LOONGARCH_KERNEL_IMAGE_VARS_H + +#ifdef CONFIG_EFI_STUB + +__efistub_memcmp = memcmp; +__efistub_memchr = memchr; +__efistub_strcat = strcat; +__efistub_strcmp = strcmp; +__efistub_strlen = strlen; +__efistub_strncat = strncat; +__efistub_strnstr = strnstr; +__efistub_strnlen = strnlen; +__efistub_strrchr = strrchr; +__efistub_kernel_entry = kernel_entry; +__efistub_kernel_asize = kernel_asize; +__efistub_kernel_fsize = kernel_fsize; +__efistub_kernel_offset = kernel_offset; +__efistub_screen_info = screen_info; + +#endif + +#endif /* __LOONGARCH_KERNEL_IMAGE_VARS_H */ diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c new file mode 100644 index 000000000..b1df0ec34 --- /dev/null +++ b/arch/loongarch/kernel/inst.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <asm/inst.h> + +u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm) +{ + union loongarch_instruction insn; + + insn.reg1i20_format.opcode = lu32id_op; + insn.reg1i20_format.rd = rd; + insn.reg1i20_format.immediate = imm; + + return insn.word; +} + +u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) +{ + union loongarch_instruction insn; + + insn.reg2i12_format.opcode = lu52id_op; + insn.reg2i12_format.rd = rd; + insn.reg2i12_format.rj = rj; + insn.reg2i12_format.immediate = imm; + + return insn.word; +} + +u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest) +{ + union loongarch_instruction insn; + + insn.reg2i16_format.opcode = jirl_op; + insn.reg2i16_format.rd = rd; + insn.reg2i16_format.rj = rj; + insn.reg2i16_format.immediate = (dest - pc) >> 2; + + return insn.word; +} diff --git a/arch/loongarch/kernel/io.c b/arch/loongarch/kernel/io.c new file mode 100644 index 000000000..cb85bda5a --- /dev/null +++ b/arch/loongarch/kernel/io.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/export.h> +#include <linux/types.h> +#include <linux/io.h> + +/* + * Copy data from IO memory space to "real" memory space. + */ +void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) +{ + while (count && !IS_ALIGNED((unsigned long)from, 8)) { + *(u8 *)to = __raw_readb(from); + from++; + to++; + count--; + } + + while (count >= 8) { + *(u64 *)to = __raw_readq(from); + from += 8; + to += 8; + count -= 8; + } + + while (count) { + *(u8 *)to = __raw_readb(from); + from++; + to++; + count--; + } +} +EXPORT_SYMBOL(__memcpy_fromio); + +/* + * Copy data from "real" memory space to IO memory space. + */ +void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) +{ + while (count && !IS_ALIGNED((unsigned long)to, 8)) { + __raw_writeb(*(u8 *)from, to); + from++; + to++; + count--; + } + + while (count >= 8) { + __raw_writeq(*(u64 *)from, to); + from += 8; + to += 8; + count -= 8; + } + + while (count) { + __raw_writeb(*(u8 *)from, to); + from++; + to++; + count--; + } +} +EXPORT_SYMBOL(__memcpy_toio); + +/* + * "memset" on IO memory space. + */ +void __memset_io(volatile void __iomem *dst, int c, size_t count) +{ + u64 qc = (u8)c; + + qc |= qc << 8; + qc |= qc << 16; + qc |= qc << 32; + + while (count && !IS_ALIGNED((unsigned long)dst, 8)) { + __raw_writeb(c, dst); + dst++; + count--; + } + + while (count >= 8) { + __raw_writeq(qc, dst); + dst += 8; + count -= 8; + } + + while (count) { + __raw_writeb(c, dst); + dst++; + count--; + } +} +EXPORT_SYMBOL(__memset_io); diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c new file mode 100644 index 000000000..0524bf116 --- /dev/null +++ b/arch/loongarch/kernel/irq.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/kernel.h> +#include <linux/acpi.h> +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/kernel_stat.h> +#include <linux/proc_fs.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> + +#include <asm/irq.h> +#include <asm/loongson.h> +#include <asm/setup.h> + +DEFINE_PER_CPU(unsigned long, irq_stack); +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); +EXPORT_PER_CPU_SYMBOL(irq_stat); + +struct acpi_vector_group pch_group[MAX_IO_PICS]; +struct acpi_vector_group msi_group[MAX_IO_PICS]; +/* + * 'what should we do if we get a hw irq event on an illegal vector'. + * each architecture has to answer this themselves. + */ +void ack_bad_irq(unsigned int irq) +{ + pr_warn("Unexpected IRQ # %d\n", irq); +} + +atomic_t irq_err_count; + +asmlinkage void spurious_interrupt(void) +{ + atomic_inc(&irq_err_count); +} + +int arch_show_interrupts(struct seq_file *p, int prec) +{ +#ifdef CONFIG_SMP + show_ipi_list(p, prec); +#endif + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); + return 0; +} + +static int __init early_pci_mcfg_parse(struct acpi_table_header *header) +{ + struct acpi_table_mcfg *mcfg; + struct acpi_mcfg_allocation *mptr; + int i, n; + + if (header->length < sizeof(struct acpi_table_mcfg)) + return -EINVAL; + + n = (header->length - sizeof(struct acpi_table_mcfg)) / + sizeof(struct acpi_mcfg_allocation); + mcfg = (struct acpi_table_mcfg *)header; + mptr = (struct acpi_mcfg_allocation *) &mcfg[1]; + + for (i = 0; i < n; i++, mptr++) { + msi_group[i].pci_segment = mptr->pci_segment; + pch_group[i].node = msi_group[i].node = (mptr->address >> 44) & 0xf; + } + + return 0; +} + +static void __init init_vec_parent_group(void) +{ + int i; + + for (i = 0; i < MAX_IO_PICS; i++) { + msi_group[i].pci_segment = -1; + msi_group[i].node = -1; + pch_group[i].node = -1; + } + + acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); +} + +static int __init get_ipi_irq(void) +{ + struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + + if (d) + return irq_create_mapping(d, EXCCODE_IPI - EXCCODE_INT_START); + + return -EINVAL; +} + +void __init init_IRQ(void) +{ + int i; +#ifdef CONFIG_SMP + int r, ipi_irq; + static int ipi_dummy_dev; +#endif + unsigned int order = get_order(IRQ_STACK_SIZE); + struct page *page; + + clear_csr_ecfg(ECFG0_IM); + clear_csr_estat(ESTATF_IP); + + init_vec_parent_group(); + irqchip_init(); +#ifdef CONFIG_SMP + ipi_irq = get_ipi_irq(); + if (ipi_irq < 0) + panic("IPI IRQ mapping failed\n"); + irq_set_percpu_devid(ipi_irq); + r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev); + if (r < 0) + panic("IPI IRQ request failed\n"); +#endif + + for (i = 0; i < NR_IRQS; i++) + irq_set_noprobe(i); + + for_each_possible_cpu(i) { + page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order); + + per_cpu(irq_stack, i) = (unsigned long)page_address(page); + pr_debug("CPU%d IRQ stack at 0x%lx - 0x%lx\n", i, + per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE); + } + + set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); +} diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c new file mode 100644 index 000000000..2dcb9e003 --- /dev/null +++ b/arch/loongarch/kernel/machine_kexec.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * machine_kexec.c for kexec + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include <linux/compiler.h> +#include <linux/cpu.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/libfdt.h> +#include <linux/mm.h> +#include <linux/of_fdt.h> +#include <linux/reboot.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> + +#include <asm/bootinfo.h> +#include <asm/cacheflush.h> +#include <asm/page.h> + +/* 0x100000 ~ 0x200000 is safe */ +#define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL) +#define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL) + +static unsigned long reboot_code_buffer; +static cpumask_t cpus_in_crash = CPU_MASK_NONE; + +#ifdef CONFIG_SMP +static void (*relocated_kexec_smp_wait)(void *); +atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); +#endif + +static unsigned long efi_boot; +static unsigned long cmdline_ptr; +static unsigned long systable_ptr; +static unsigned long start_addr; +static unsigned long first_ind_entry; + +static void kexec_image_info(const struct kimage *kimage) +{ + unsigned long i; + + pr_debug("kexec kimage info:\n"); + pr_debug("\ttype: %d\n", kimage->type); + pr_debug("\tstart: %lx\n", kimage->start); + pr_debug("\thead: %lx\n", kimage->head); + pr_debug("\tnr_segments: %lu\n", kimage->nr_segments); + + for (i = 0; i < kimage->nr_segments; i++) { + pr_debug("\t segment[%lu]: %016lx - %016lx", i, + kimage->segment[i].mem, + kimage->segment[i].mem + kimage->segment[i].memsz); + pr_debug("\t\t0x%lx bytes, %lu pages\n", + (unsigned long)kimage->segment[i].memsz, + (unsigned long)kimage->segment[i].memsz / PAGE_SIZE); + } +} + +int machine_kexec_prepare(struct kimage *kimage) +{ + int i; + char *bootloader = "kexec"; + void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR; + + kexec_image_info(kimage); + + kimage->arch.efi_boot = fw_arg0; + kimage->arch.systable_ptr = fw_arg2; + + /* Find the command line */ + for (i = 0; i < kimage->nr_segments; i++) { + if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { + if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) + kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + break; + } + } + + if (!kimage->arch.cmdline_ptr) { + pr_err("Command line not included in the provided image\n"); + return -EINVAL; + } + + /* kexec/kdump need a safe page to save reboot_code_buffer */ + kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE); + + reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page); + memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); + +#ifdef CONFIG_SMP + /* All secondary cpus now may jump to kexec_smp_wait cycle */ + relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel); +#endif + + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void kexec_reboot(void) +{ + do_kexec_t do_kexec = NULL; + + /* + * We know we were online, and there will be no incoming IPIs at + * this point. Mark online again before rebooting so that the crash + * analysis tool will see us correctly. + */ + set_cpu_online(smp_processor_id(), true); + + /* Ensure remote CPUs observe that we're online before rebooting. */ + smp_mb__after_atomic(); + + /* + * Make sure we get correct instructions written by the + * machine_kexec_prepare() CPU. + */ + __asm__ __volatile__ ("\tibar 0\n"::); + +#ifdef CONFIG_SMP + /* All secondary cpus go to kexec_smp_wait */ + if (smp_processor_id() > 0) { + relocated_kexec_smp_wait(NULL); + unreachable(); + } +#endif + + do_kexec = (void *)reboot_code_buffer; + do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry); + + unreachable(); +} + + +#ifdef CONFIG_SMP +static void kexec_shutdown_secondary(void *regs) +{ + int cpu = smp_processor_id(); + + if (!cpu_online(cpu)) + return; + + /* We won't be sent IPIs any more. */ + set_cpu_online(cpu, false); + + local_irq_disable(); + while (!atomic_read(&kexec_ready_to_reboot)) + cpu_relax(); + + kexec_reboot(); +} + +static void crash_shutdown_secondary(void *passed_regs) +{ + int cpu = smp_processor_id(); + struct pt_regs *regs = passed_regs; + + /* + * If we are passed registers, use those. Otherwise get the + * regs from the last interrupt, which should be correct, as + * we are in an interrupt. But if the regs are not there, + * pull them from the top of the stack. They are probably + * wrong, but we need something to keep from crashing again. + */ + if (!regs) + regs = get_irq_regs(); + if (!regs) + regs = task_pt_regs(current); + + if (!cpu_online(cpu)) + return; + + /* We won't be sent IPIs any more. */ + set_cpu_online(cpu, false); + + local_irq_disable(); + if (!cpumask_test_cpu(cpu, &cpus_in_crash)) + crash_save_cpu(regs, cpu); + cpumask_set_cpu(cpu, &cpus_in_crash); + + while (!atomic_read(&kexec_ready_to_reboot)) + cpu_relax(); + + kexec_reboot(); +} + +void crash_smp_send_stop(void) +{ + unsigned int ncpus; + unsigned long timeout; + static int cpus_stopped; + + /* + * This function can be called twice in panic path, but obviously + * we should execute this only once. + */ + if (cpus_stopped) + return; + + cpus_stopped = 1; + + /* Excluding the panic cpu */ + ncpus = num_online_cpus() - 1; + + smp_call_function(crash_shutdown_secondary, NULL, 0); + smp_wmb(); + + /* + * The crash CPU sends an IPI and wait for other CPUs to + * respond. Delay of at least 10 seconds. + */ + timeout = MSEC_PER_SEC * 10; + pr_emerg("Sending IPI to other cpus...\n"); + while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) { + mdelay(1); + cpu_relax(); + } +} +#endif /* defined(CONFIG_SMP) */ + +void machine_shutdown(void) +{ + int cpu; + + /* All CPUs go to reboot_code_buffer */ + for_each_possible_cpu(cpu) + if (!cpu_online(cpu)) + cpu_device_up(get_cpu_device(cpu)); + +#ifdef CONFIG_SMP + smp_call_function(kexec_shutdown_secondary, NULL, 0); +#endif +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ + int crashing_cpu; + + local_irq_disable(); + + crashing_cpu = smp_processor_id(); + crash_save_cpu(regs, crashing_cpu); + +#ifdef CONFIG_SMP + crash_smp_send_stop(); +#endif + cpumask_set_cpu(crashing_cpu, &cpus_in_crash); + + pr_info("Starting crashdump kernel...\n"); +} + +void machine_kexec(struct kimage *image) +{ + unsigned long entry, *ptr; + struct kimage_arch *internal = &image->arch; + + efi_boot = internal->efi_boot; + cmdline_ptr = internal->cmdline_ptr; + systable_ptr = internal->systable_ptr; + + start_addr = (unsigned long)phys_to_virt(image->start); + + first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ? + (unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0; + + /* + * The generic kexec code builds a page list with physical + * addresses. they are directly accessible through XKPRANGE + * hence the phys_to_virt() call. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } + + /* Mark offline before disabling local irq. */ + set_cpu_online(smp_processor_id(), false); + + /* We do not want to be bothered. */ + local_irq_disable(); + + pr_notice("EFI boot flag 0x%lx\n", efi_boot); + pr_notice("Command line at 0x%lx\n", cmdline_ptr); + pr_notice("System table at 0x%lx\n", systable_ptr); + pr_notice("We will call new kernel at 0x%lx\n", start_addr); + pr_notice("Bye ...\n"); + + /* Make reboot code buffer available to the boot CPU. */ + flush_cache_all(); + +#ifdef CONFIG_SMP + atomic_set(&kexec_ready_to_reboot, 1); +#endif + + kexec_reboot(); +} diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c new file mode 100644 index 000000000..aed901c57 --- /dev/null +++ b/arch/loongarch/kernel/mem.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/efi.h> +#include <linux/initrd.h> +#include <linux/memblock.h> + +#include <asm/bootinfo.h> +#include <asm/loongson.h> +#include <asm/sections.h> + +void __init memblock_init(void) +{ + u32 mem_type; + u64 mem_start, mem_end, mem_size; + efi_memory_desc_t *md; + + /* Parse memory information */ + for_each_efi_memory_desc(md) { + mem_type = md->type; + mem_start = md->phys_addr; + mem_size = md->num_pages << EFI_PAGE_SHIFT; + mem_end = mem_start + mem_size; + + switch (mem_type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_PERSISTENT_MEMORY: + case EFI_CONVENTIONAL_MEMORY: + memblock_add(mem_start, mem_size); + if (max_low_pfn < (mem_end >> PAGE_SHIFT)) + max_low_pfn = mem_end >> PAGE_SHIFT; + break; + case EFI_PAL_CODE: + case EFI_UNUSABLE_MEMORY: + case EFI_ACPI_RECLAIM_MEMORY: + memblock_add(mem_start, mem_size); + fallthrough; + case EFI_RESERVED_TYPE: + case EFI_RUNTIME_SERVICES_CODE: + case EFI_RUNTIME_SERVICES_DATA: + case EFI_MEMORY_MAPPED_IO: + case EFI_MEMORY_MAPPED_IO_PORT_SPACE: + memblock_reserve(mem_start, mem_size); + break; + } + } + + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + + /* Reserve the first 2MB */ + memblock_reserve(PHYS_OFFSET, 0x200000); + + /* Reserve the kernel text/data/bss */ + memblock_reserve(__pa_symbol(&_text), + __pa_symbol(&_end) - __pa_symbol(&_text)); + + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0); +} diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c new file mode 100644 index 000000000..d296a70b7 --- /dev/null +++ b/arch/loongarch/kernel/module-sections.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/elf.h> +#include <linux/kernel.h> +#include <linux/module.h> + +Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val) +{ + struct mod_section *got_sec = &mod->arch.got; + int i = got_sec->num_entries; + struct got_entry *got = get_got_entry(val, got_sec); + + if (got) + return (Elf_Addr)got; + + /* There is no GOT entry for val yet, create a new one. */ + got = (struct got_entry *)got_sec->shdr->sh_addr; + got[i] = emit_got_entry(val); + + got_sec->num_entries++; + if (got_sec->num_entries > got_sec->max_entries) { + /* + * This may happen when the module contains a GOT_HI20 without + * a paired GOT_LO12. Such a module is broken, reject it. + */ + pr_err("%s: module contains bad GOT relocation\n", mod->name); + return 0; + } + + return (Elf_Addr)&got[i]; +} + +Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val) +{ + int nr; + struct mod_section *plt_sec = &mod->arch.plt; + struct mod_section *plt_idx_sec = &mod->arch.plt_idx; + struct plt_entry *plt = get_plt_entry(val, plt_sec, plt_idx_sec); + struct plt_idx_entry *plt_idx; + + if (plt) + return (Elf_Addr)plt; + + nr = plt_sec->num_entries; + + /* There is no duplicate entry, create a new one */ + plt = (struct plt_entry *)plt_sec->shdr->sh_addr; + plt[nr] = emit_plt_entry(val); + plt_idx = (struct plt_idx_entry *)plt_idx_sec->shdr->sh_addr; + plt_idx[nr] = emit_plt_idx_entry(val); + + plt_sec->num_entries++; + plt_idx_sec->num_entries++; + BUG_ON(plt_sec->num_entries > plt_sec->max_entries); + + return (Elf_Addr)&plt[nr]; +} + +static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) +{ + return x->r_info == y->r_info && x->r_addend == y->r_addend; +} + +static bool duplicate_rela(const Elf_Rela *rela, int idx) +{ + int i; + + for (i = 0; i < idx; i++) { + if (is_rela_equal(&rela[i], &rela[idx])) + return true; + } + + return false; +} + +static void count_max_entries(Elf_Rela *relas, int num, + unsigned int *plts, unsigned int *gots) +{ + unsigned int i, type; + + for (i = 0; i < num; i++) { + type = ELF_R_TYPE(relas[i].r_info); + switch (type) { + case R_LARCH_SOP_PUSH_PLT_PCREL: + case R_LARCH_B26: + if (!duplicate_rela(relas, i)) + (*plts)++; + break; + case R_LARCH_GOT_PC_HI20: + if (!duplicate_rela(relas, i)) + (*gots)++; + break; + default: + break; /* Do nothing. */ + } + } +} + +int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + unsigned int i, num_plts = 0, num_gots = 0; + + /* + * Find the empty .plt sections. + */ + for (i = 0; i < ehdr->e_shnum; i++) { + if (!strcmp(secstrings + sechdrs[i].sh_name, ".got")) + mod->arch.got.shdr = sechdrs + i; + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) + mod->arch.plt.shdr = sechdrs + i; + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx")) + mod->arch.plt_idx.shdr = sechdrs + i; + } + + if (!mod->arch.got.shdr) { + pr_err("%s: module GOT section(s) missing\n", mod->name); + return -ENOEXEC; + } + if (!mod->arch.plt.shdr) { + pr_err("%s: module PLT section(s) missing\n", mod->name); + return -ENOEXEC; + } + if (!mod->arch.plt_idx.shdr) { + pr_err("%s: module PLT.IDX section(s) missing\n", mod->name); + return -ENOEXEC; + } + + /* Calculate the maxinum number of entries */ + for (i = 0; i < ehdr->e_shnum; i++) { + int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela); + Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; + Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; + + if (sechdrs[i].sh_type != SHT_RELA) + continue; + + /* ignore relocations that operate on non-exec sections */ + if (!(dst_sec->sh_flags & SHF_EXECINSTR)) + continue; + + count_max_entries(relas, num_rela, &num_plts, &num_gots); + } + + mod->arch.got.shdr->sh_type = SHT_NOBITS; + mod->arch.got.shdr->sh_flags = SHF_ALLOC; + mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES; + mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry); + mod->arch.got.num_entries = 0; + mod->arch.got.max_entries = num_gots; + + mod->arch.plt.shdr->sh_type = SHT_NOBITS; + mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES; + mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry); + mod->arch.plt.num_entries = 0; + mod->arch.plt.max_entries = num_plts; + + mod->arch.plt_idx.shdr->sh_type = SHT_NOBITS; + mod->arch.plt_idx.shdr->sh_flags = SHF_ALLOC; + mod->arch.plt_idx.shdr->sh_addralign = L1_CACHE_BYTES; + mod->arch.plt_idx.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry); + mod->arch.plt_idx.num_entries = 0; + mod->arch.plt_idx.max_entries = num_plts; + + return 0; +} diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c new file mode 100644 index 000000000..4f1e6e55d --- /dev/null +++ b/arch/loongarch/kernel/module.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#define pr_fmt(fmt) "kmod: " fmt + +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/mm.h> +#include <linux/numa.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> + +static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) +{ + if (*rela_stack_top >= RELA_STACK_DEPTH) + return -ENOEXEC; + + rela_stack[(*rela_stack_top)++] = stack_value; + pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value); + + return 0; +} + +static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top) +{ + if (*rela_stack_top == 0) + return -ENOEXEC; + + *stack_value = rela_stack[--(*rela_stack_top)]; + pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value); + + return 0; +} + +static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + return 0; +} + +static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type); + return -EINVAL; +} + +static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + *location = v; + return 0; +} + +static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + *(Elf_Addr *)location = v; + return 0; +} + +static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top); +} + +static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + return rela_stack_push(v, rela_stack, rela_stack_top); +} + +static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + int err = 0; + s64 opr1; + + err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); + if (err) + return err; + err = rela_stack_push(opr1, rela_stack, rela_stack_top); + if (err) + return err; + err = rela_stack_push(opr1, rela_stack, rela_stack_top); + if (err) + return err; + + return 0; +} + +static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + ptrdiff_t offset = (void *)v - (void *)location; + + if (offset >= SZ_128M) + v = module_emit_plt_entry(mod, v); + + if (offset < -SZ_128M) + v = module_emit_plt_entry(mod, v); + + return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type); +} + +static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + int err = 0; + s64 opr1, opr2, opr3; + + if (type == R_LARCH_SOP_IF_ELSE) { + err = rela_stack_pop(&opr3, rela_stack, rela_stack_top); + if (err) + return err; + } + + err = rela_stack_pop(&opr2, rela_stack, rela_stack_top); + if (err) + return err; + err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); + if (err) + return err; + + switch (type) { + case R_LARCH_SOP_AND: + err = rela_stack_push(opr1 & opr2, rela_stack, rela_stack_top); + break; + case R_LARCH_SOP_ADD: + err = rela_stack_push(opr1 + opr2, rela_stack, rela_stack_top); + break; + case R_LARCH_SOP_SUB: + err = rela_stack_push(opr1 - opr2, rela_stack, rela_stack_top); + break; + case R_LARCH_SOP_SL: + err = rela_stack_push(opr1 << opr2, rela_stack, rela_stack_top); + break; + case R_LARCH_SOP_SR: + err = rela_stack_push(opr1 >> opr2, rela_stack, rela_stack_top); + break; + case R_LARCH_SOP_IF_ELSE: + err = rela_stack_push(opr1 ? opr2 : opr3, rela_stack, rela_stack_top); + break; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } + + return err; +} + +static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + int err = 0; + s64 opr1; + union loongarch_instruction *insn = (union loongarch_instruction *)location; + + err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); + if (err) + return err; + + switch (type) { + case R_LARCH_SOP_POP_32_U_10_12: + if (!unsigned_imm_check(opr1, 12)) + goto overflow; + + /* (*(uint32_t *) PC) [21 ... 10] = opr [11 ... 0] */ + insn->reg2i12_format.immediate = opr1 & 0xfff; + return 0; + case R_LARCH_SOP_POP_32_S_10_12: + if (!signed_imm_check(opr1, 12)) + goto overflow; + + insn->reg2i12_format.immediate = opr1 & 0xfff; + return 0; + case R_LARCH_SOP_POP_32_S_10_16: + if (!signed_imm_check(opr1, 16)) + goto overflow; + + insn->reg2i16_format.immediate = opr1 & 0xffff; + return 0; + case R_LARCH_SOP_POP_32_S_10_16_S2: + if (opr1 % 4) + goto unaligned; + + if (!signed_imm_check(opr1, 18)) + goto overflow; + + insn->reg2i16_format.immediate = (opr1 >> 2) & 0xffff; + return 0; + case R_LARCH_SOP_POP_32_S_5_20: + if (!signed_imm_check(opr1, 20)) + goto overflow; + + insn->reg1i20_format.immediate = (opr1) & 0xfffff; + return 0; + case R_LARCH_SOP_POP_32_S_0_5_10_16_S2: + if (opr1 % 4) + goto unaligned; + + if (!signed_imm_check(opr1, 23)) + goto overflow; + + opr1 >>= 2; + insn->reg1i21_format.immediate_l = opr1 & 0xffff; + insn->reg1i21_format.immediate_h = (opr1 >> 16) & 0x1f; + return 0; + case R_LARCH_SOP_POP_32_S_0_10_10_16_S2: + if (opr1 % 4) + goto unaligned; + + if (!signed_imm_check(opr1, 28)) + goto overflow; + + opr1 >>= 2; + insn->reg0i26_format.immediate_l = opr1 & 0xffff; + insn->reg0i26_format.immediate_h = (opr1 >> 16) & 0x3ff; + return 0; + case R_LARCH_SOP_POP_32_U: + if (!unsigned_imm_check(opr1, 32)) + goto overflow; + + /* (*(uint32_t *) PC) = opr */ + *location = (u32)opr1; + return 0; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } + +overflow: + pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n", + mod->name, opr1, __func__, type); + return -ENOEXEC; + +unaligned: + pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n", + mod->name, opr1, __func__, type); + return -ENOEXEC; +} + +static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + switch (type) { + case R_LARCH_ADD32: + *(s32 *)location += v; + return 0; + case R_LARCH_ADD64: + *(s64 *)location += v; + return 0; + case R_LARCH_SUB32: + *(s32 *)location -= v; + return 0; + case R_LARCH_SUB64: + *(s64 *)location -= v; + return 0; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } +} + +static int apply_r_larch_b26(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + ptrdiff_t offset = (void *)v - (void *)location; + union loongarch_instruction *insn = (union loongarch_instruction *)location; + + if (offset >= SZ_128M) + v = module_emit_plt_entry(mod, v); + + if (offset < -SZ_128M) + v = module_emit_plt_entry(mod, v); + + offset = (void *)v - (void *)location; + + if (offset & 3) { + pr_err("module %s: jump offset = 0x%llx unaligned! dangerous R_LARCH_B26 (%u) relocation\n", + mod->name, (long long)offset, type); + return -ENOEXEC; + } + + if (!signed_imm_check(offset, 28)) { + pr_err("module %s: jump offset = 0x%llx overflow! dangerous R_LARCH_B26 (%u) relocation\n", + mod->name, (long long)offset, type); + return -ENOEXEC; + } + + offset >>= 2; + insn->reg0i26_format.immediate_l = offset & 0xffff; + insn->reg0i26_format.immediate_h = (offset >> 16) & 0x3ff; + + return 0; +} + +static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + union loongarch_instruction *insn = (union loongarch_instruction *)location; + /* Use s32 for a sign-extension deliberately. */ + s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) - + (void *)((Elf_Addr)location & ~0xfff); + Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20; + ptrdiff_t offset_rem = (void *)v - (void *)anchor; + + switch (type) { + case R_LARCH_PCALA_LO12: + insn->reg2i12_format.immediate = v & 0xfff; + break; + case R_LARCH_PCALA_HI20: + v = offset_hi20 >> 12; + insn->reg1i20_format.immediate = v & 0xfffff; + break; + case R_LARCH_PCALA64_LO20: + v = offset_rem >> 32; + insn->reg1i20_format.immediate = v & 0xfffff; + break; + case R_LARCH_PCALA64_HI12: + v = offset_rem >> 52; + insn->reg2i12_format.immediate = v & 0xfff; + break; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } + + return 0; +} + +static int apply_r_larch_got_pc(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + Elf_Addr got = module_emit_got_entry(mod, v); + + if (!got) + return -EINVAL; + + switch (type) { + case R_LARCH_GOT_PC_LO12: + type = R_LARCH_PCALA_LO12; + break; + case R_LARCH_GOT_PC_HI20: + type = R_LARCH_PCALA_HI20; + break; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } + + return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type); +} + +/* + * reloc_handlers_rela() - Apply a particular relocation to a module + * @mod: the module to apply the reloc to + * @location: the address at which the reloc is to be applied + * @v: the value of the reloc, with addend for RELA-style + * @rela_stack: the stack used for store relocation info, LOCAL to THIS module + * @rela_stac_top: where the stack operation(pop/push) applies to + * + * Return: 0 upon success, else -ERRNO + */ +typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type); + +/* The handlers for known reloc types */ +static reloc_rela_handler reloc_rela_handlers[] = { + [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error, + + [R_LARCH_NONE] = apply_r_larch_none, + [R_LARCH_32] = apply_r_larch_32, + [R_LARCH_64] = apply_r_larch_64, + [R_LARCH_MARK_LA] = apply_r_larch_none, + [R_LARCH_MARK_PCREL] = apply_r_larch_none, + [R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel, + [R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute, + [R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup, + [R_LARCH_SOP_PUSH_PLT_PCREL] = apply_r_larch_sop_push_plt_pcrel, + [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop, + [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, + [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, + [R_LARCH_B26] = apply_r_larch_b26, + [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, + [R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12] = apply_r_larch_got_pc, +}; + +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *mod) +{ + int i, err; + unsigned int type; + s64 rela_stack[RELA_STACK_DEPTH]; + size_t rela_stack_top = 0; + reloc_rela_handler handler; + void *location; + Elf_Addr v; + Elf_Sym *sym; + Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr; + + pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec, + sechdrs[relsec].sh_info); + + rela_stack_top = 0; + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; + /* This is the symbol it is referring to */ + sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info); + if (IS_ERR_VALUE(sym->st_value)) { + /* Ignore unresolved weak symbol */ + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) + continue; + pr_warn("%s: Unknown symbol %s\n", mod->name, strtab + sym->st_name); + return -ENOENT; + } + + type = ELF_R_TYPE(rel[i].r_info); + + if (type < ARRAY_SIZE(reloc_rela_handlers)) + handler = reloc_rela_handlers[type]; + else + handler = NULL; + + if (!handler) { + pr_err("%s: Unknown relocation type %u\n", mod->name, type); + return -EINVAL; + } + + pr_debug("type %d st_value %llx r_addend %llx loc %llx\n", + (int)ELF_R_TYPE(rel[i].r_info), + sym->st_value, rel[i].r_addend, (u64)location); + + v = sym->st_value + rel[i].r_addend; + err = handler(mod, location, v, rela_stack, &rela_stack_top, type); + if (err) + return err; + } + + return 0; +} + +void *module_alloc(unsigned long size) +{ + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); +} diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c new file mode 100644 index 000000000..f7ffce170 --- /dev/null +++ b/arch/loongarch/kernel/numa.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Xiang Gao <gaoxiang@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/mmzone.h> +#include <linux/export.h> +#include <linux/nodemask.h> +#include <linux/swap.h> +#include <linux/memblock.h> +#include <linux/pfn.h> +#include <linux/acpi.h> +#include <linux/efi.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <asm/bootinfo.h> +#include <asm/loongson.h> +#include <asm/numa.h> +#include <asm/page.h> +#include <asm/pgalloc.h> +#include <asm/sections.h> +#include <asm/time.h> + +int numa_off; +struct pglist_data *node_data[MAX_NUMNODES]; +unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES]; + +EXPORT_SYMBOL(node_data); +EXPORT_SYMBOL(node_distances); + +static struct numa_meminfo numa_meminfo; +cpumask_t cpus_on_node[MAX_NUMNODES]; +cpumask_t phys_cpus_on_node[MAX_NUMNODES]; +EXPORT_SYMBOL(cpus_on_node); + +/* + * apicid, cpu, node mappings + */ +s16 __cpuid_to_node[CONFIG_NR_CPUS] = { + [0 ... CONFIG_NR_CPUS - 1] = NUMA_NO_NODE +}; +EXPORT_SYMBOL(__cpuid_to_node); + +nodemask_t numa_nodes_parsed __initdata; + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); + +static int __init pcpu_cpu_to_node(int cpu) +{ + return early_cpu_to_node(cpu); +} + +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + if (early_cpu_to_node(from) == early_cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; +} + +void __init pcpu_populate_pte(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d = p4d_offset(pgd, addr); + pud_t *pud; + pmd_t *pmd; + + if (p4d_none(*p4d)) { + pud_t *new; + + new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + pgd_populate(&init_mm, pgd, new); +#ifndef __PAGETABLE_PUD_FOLDED + pud_init((unsigned long)new, (unsigned long)invalid_pmd_table); +#endif + } + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + pmd_t *new; + + new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + pud_populate(&init_mm, pud, new); +#ifndef __PAGETABLE_PMD_FOLDED + pmd_init((unsigned long)new, (unsigned long)invalid_pte_table); +#endif + } + + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) { + pte_t *new; + + new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, new); + } +} + +void __init setup_per_cpu_areas(void) +{ + unsigned long delta; + unsigned int cpu; + int rc = -EINVAL; + + if (pcpu_chosen_fc == PCPU_FC_AUTO) { + if (nr_node_ids >= 8) + pcpu_chosen_fc = PCPU_FC_PAGE; + else + pcpu_chosen_fc = PCPU_FC_EMBED; + } + + /* + * Always reserve area for module percpu variables. That's + * what the legacy allocator did. + */ + if (pcpu_chosen_fc != PCPU_FC_PAGE) { + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, PMD_SIZE, + pcpu_cpu_distance, pcpu_cpu_to_node); + if (rc < 0) + pr_warn("%s allocator failed (%d), falling back to page size\n", + pcpu_fc_names[pcpu_chosen_fc], rc); + } + if (rc < 0) + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node); + if (rc < 0) + panic("cannot initialize percpu area (err=%d)", rc); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; +} +#endif + +/* + * Get nodeid by logical cpu number. + * __cpuid_to_node maps phyical cpu id to node, so we + * should use cpu_logical_map(cpu) to index it. + * + * This routine is only used in early phase during + * booting, after setup_per_cpu_areas calling and numa_node + * initialization, cpu_to_node will be used instead. + */ +int early_cpu_to_node(int cpu) +{ + int physid = cpu_logical_map(cpu); + + if (physid < 0) + return NUMA_NO_NODE; + + return __cpuid_to_node[physid]; +} + +void __init early_numa_add_cpu(int cpuid, s16 node) +{ + int cpu = __cpu_number_map[cpuid]; + + if (cpu < 0) + return; + + cpumask_set_cpu(cpu, &cpus_on_node[node]); + cpumask_set_cpu(cpuid, &phys_cpus_on_node[node]); +} + +void numa_add_cpu(unsigned int cpu) +{ + int nid = cpu_to_node(cpu); + cpumask_set_cpu(cpu, &cpus_on_node[nid]); +} + +void numa_remove_cpu(unsigned int cpu) +{ + int nid = cpu_to_node(cpu); + cpumask_clear_cpu(cpu, &cpus_on_node[nid]); +} + +static int __init numa_add_memblk_to(int nid, u64 start, u64 end, + struct numa_meminfo *mi) +{ + /* ignore zero length blks */ + if (start == end) + return 0; + + /* whine about and ignore invalid blks */ + if (start > end || nid < 0 || nid >= MAX_NUMNODES) { + pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", + nid, start, end - 1); + return 0; + } + + if (mi->nr_blks >= NR_NODE_MEMBLKS) { + pr_err("NUMA: too many memblk ranges\n"); + return -EINVAL; + } + + mi->blk[mi->nr_blks].start = PFN_ALIGN(start); + mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1); + mi->blk[mi->nr_blks].nid = nid; + mi->nr_blks++; + return 0; +} + +/** + * numa_add_memblk - Add one numa_memblk to numa_meminfo + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * Add a new memblk to the default numa_meminfo. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + return numa_add_memblk_to(nid, start, end, &numa_meminfo); +} + +static void __init alloc_node_data(int nid) +{ + void *nd; + unsigned long nd_pa; + size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE); + + nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid); + if (!nd_pa) { + pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid); + return; + } + + nd = __va(nd_pa); + + node_data[nid] = nd; + memset(nd, 0, sizeof(pg_data_t)); +} + +static void __init node_mem_init(unsigned int node) +{ + unsigned long start_pfn, end_pfn; + unsigned long node_addrspace_offset; + + node_addrspace_offset = nid_to_addrbase(node); + pr_info("Node%d's addrspace_offset is 0x%lx\n", + node, node_addrspace_offset); + + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n", + node, start_pfn, end_pfn); + + alloc_node_data(node); +} + +#ifdef CONFIG_ACPI_NUMA + +/* + * Sanity check to catch more bad NUMA configurations (they are amazingly + * common). Make sure the nodes cover all memory. + */ +static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) +{ + int i; + u64 numaram, biosram; + + numaram = 0; + for (i = 0; i < mi->nr_blks; i++) { + u64 s = mi->blk[i].start >> PAGE_SHIFT; + u64 e = mi->blk[i].end >> PAGE_SHIFT; + + numaram += e - s; + numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); + if ((s64)numaram < 0) + numaram = 0; + } + max_pfn = max_low_pfn; + biosram = max_pfn - absent_pages_in_range(0, max_pfn); + + BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT))); + return true; +} + +static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type) +{ + static unsigned long num_physpages; + + num_physpages += (size >> PAGE_SHIFT); + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", + node, type, start, size); + pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", + start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages); + memblock_set_node(start, size, &memblock.memory, node); +} + +/* + * add_numamem_region + * + * Add a uasable memory region described by BIOS. The + * routine gets each intersection between BIOS's region + * and node's region, and adds them into node's memblock + * pool. + * + */ +static void __init add_numamem_region(u64 start, u64 end, u32 type) +{ + u32 i; + u64 ofs = start; + + if (start >= end) { + pr_debug("Invalid region: %016llx-%016llx\n", start, end); + return; + } + + for (i = 0; i < numa_meminfo.nr_blks; i++) { + struct numa_memblk *mb = &numa_meminfo.blk[i]; + + if (ofs > mb->end) + continue; + + if (end > mb->end) { + add_node_intersection(mb->nid, ofs, mb->end - ofs, type); + ofs = mb->end; + } else { + add_node_intersection(mb->nid, ofs, end - ofs, type); + break; + } + } +} + +static void __init init_node_memblock(void) +{ + u32 mem_type; + u64 mem_end, mem_start, mem_size; + efi_memory_desc_t *md; + + /* Parse memory information and activate */ + for_each_efi_memory_desc(md) { + mem_type = md->type; + mem_start = md->phys_addr; + mem_size = md->num_pages << EFI_PAGE_SHIFT; + mem_end = mem_start + mem_size; + + switch (mem_type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_PERSISTENT_MEMORY: + case EFI_CONVENTIONAL_MEMORY: + add_numamem_region(mem_start, mem_end, mem_type); + break; + case EFI_PAL_CODE: + case EFI_UNUSABLE_MEMORY: + case EFI_ACPI_RECLAIM_MEMORY: + add_numamem_region(mem_start, mem_end, mem_type); + fallthrough; + case EFI_RESERVED_TYPE: + case EFI_RUNTIME_SERVICES_CODE: + case EFI_RUNTIME_SERVICES_DATA: + case EFI_MEMORY_MAPPED_IO: + case EFI_MEMORY_MAPPED_IO_PORT_SPACE: + pr_info("Resvd: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", + mem_type, mem_start, mem_size); + break; + } + } +} + +static void __init numa_default_distance(void) +{ + int row, col; + + for (row = 0; row < MAX_NUMNODES; row++) + for (col = 0; col < MAX_NUMNODES; col++) { + if (col == row) + node_distances[row][col] = LOCAL_DISTANCE; + else + /* We assume that one node per package here! + * + * A SLIT should be used for multiple nodes + * per package to override default setting. + */ + node_distances[row][col] = REMOTE_DISTANCE; + } +} + +int __init init_numa_memory(void) +{ + int i; + int ret; + int node; + + for (i = 0; i < NR_CPUS; i++) + set_cpuid_to_node(i, NUMA_NO_NODE); + + numa_default_distance(); + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + memset(&numa_meminfo, 0, sizeof(numa_meminfo)); + + /* Parse SRAT and SLIT if provided by firmware. */ + ret = acpi_numa_init(); + if (ret < 0) + return ret; + + node_possible_map = numa_nodes_parsed; + if (WARN_ON(nodes_empty(node_possible_map))) + return -EINVAL; + + init_node_memblock(); + if (numa_meminfo_cover_memory(&numa_meminfo) == false) + return -EINVAL; + + for_each_node_mask(node, node_possible_map) { + node_mem_init(node); + node_set_online(node); + } + max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); + + setup_nr_node_ids(); + loongson_sysconf.nr_nodes = nr_node_ids; + loongson_sysconf.cores_per_node = cpumask_weight(&phys_cpus_on_node[0]); + + return 0; +} + +#endif + +void __init paging_init(void) +{ + unsigned int node; + unsigned long zones_size[MAX_NR_ZONES] = {0, }; + + for_each_online_node(node) { + unsigned long start_pfn, end_pfn; + + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + + if (end_pfn > max_low_pfn) + max_low_pfn = end_pfn; + } +#ifdef CONFIG_ZONE_DMA32 + zones_size[ZONE_DMA32] = MAX_DMA32_PFN; +#endif + zones_size[ZONE_NORMAL] = max_low_pfn; + free_area_init(zones_size); +} + +void __init mem_init(void) +{ + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + memblock_free_all(); + setup_zero_pages(); /* This comes from node 0 */ +} + +int pcibus_to_node(struct pci_bus *bus) +{ + return dev_to_node(&bus->dev); +} +EXPORT_SYMBOL(pcibus_to_node); diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c new file mode 100644 index 000000000..3a2edb157 --- /dev/null +++ b/arch/loongarch/kernel/perf_event.c @@ -0,0 +1,887 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux performance counter support for LoongArch. + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 2010 MIPS Technologies, Inc. + * Copyright (C) 2011 Cavium Networks, Inc. + * Author: Deng-Cheng Zhu + */ + +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/uaccess.h> +#include <linux/sched/task_stack.h> + +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/stacktrace.h> +#include <asm/unwind.h> + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static unsigned long +user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp) +{ + unsigned long err; + unsigned long __user *user_frame_tail; + struct stack_frame buftail; + + user_frame_tail = (unsigned long __user *)(fp - sizeof(struct stack_frame)); + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(user_frame_tail, sizeof(buftail))) + return 0; + + pagefault_disable(); + err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail)); + pagefault_enable(); + + if (err || (unsigned long)user_frame_tail >= buftail.fp) + return 0; + + perf_callchain_store(entry, buftail.ra); + + return buftail.fp; +} + +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long fp; + + if (perf_guest_state()) { + /* We don't support guest os callchain now */ + return; + } + + perf_callchain_store(entry, regs->csr_era); + + fp = regs->regs[22]; + + while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf)) + fp = user_backtrace(entry, fp); +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct unwind_state state; + unsigned long addr; + + for (unwind_start(&state, current, regs); + !unwind_done(&state); unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + if (!addr || perf_callchain_store(entry, addr)) + return; + } +} + +#define LOONGARCH_MAX_HWEVENTS 32 + +struct cpu_hw_events { + /* Array of events on this cpu. */ + struct perf_event *events[LOONGARCH_MAX_HWEVENTS]; + + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. + */ + unsigned long used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)]; + + /* + * Software copy of the control register for each performance counter. + */ + unsigned int saved_ctrl[LOONGARCH_MAX_HWEVENTS]; +}; +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { + .saved_ctrl = {0}, +}; + +/* The description of LoongArch performance events. */ +struct loongarch_perf_event { + unsigned int event_id; +}; + +static struct loongarch_perf_event raw_event; +static DEFINE_MUTEX(raw_event_mutex); + +#define C(x) PERF_COUNT_HW_CACHE_##x +#define HW_OP_UNSUPPORTED 0xffffffff +#define CACHE_OP_UNSUPPORTED 0xffffffff + +#define PERF_MAP_ALL_UNSUPPORTED \ + [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED} + +#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ +[0 ... C(MAX) - 1] = { \ + [0 ... C(OP_MAX) - 1] = { \ + [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED}, \ + }, \ +} + +struct loongarch_pmu { + u64 max_period; + u64 valid_count; + u64 overflow; + const char *name; + unsigned int num_counters; + u64 (*read_counter)(unsigned int idx); + void (*write_counter)(unsigned int idx, u64 val); + const struct loongarch_perf_event *(*map_raw_event)(u64 config); + const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; + const struct loongarch_perf_event (*cache_event_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; +}; + +static struct loongarch_pmu loongarch_pmu; + +#define M_PERFCTL_EVENT(event) (event & CSR_PERFCTRL_EVENT) + +#define M_PERFCTL_COUNT_EVENT_WHENEVER (CSR_PERFCTRL_PLV0 | \ + CSR_PERFCTRL_PLV1 | \ + CSR_PERFCTRL_PLV2 | \ + CSR_PERFCTRL_PLV3 | \ + CSR_PERFCTRL_IE) + +#define M_PERFCTL_CONFIG_MASK 0x1f0000 + +static void pause_local_counters(void); +static void resume_local_counters(void); + +static u64 loongarch_pmu_read_counter(unsigned int idx) +{ + u64 val = -1; + + switch (idx) { + case 0: + val = read_csr_perfcntr0(); + break; + case 1: + val = read_csr_perfcntr1(); + break; + case 2: + val = read_csr_perfcntr2(); + break; + case 3: + val = read_csr_perfcntr3(); + break; + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } + + return val; +} + +static void loongarch_pmu_write_counter(unsigned int idx, u64 val) +{ + switch (idx) { + case 0: + write_csr_perfcntr0(val); + return; + case 1: + write_csr_perfcntr1(val); + return; + case 2: + write_csr_perfcntr2(val); + return; + case 3: + write_csr_perfcntr3(val); + return; + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return; + } +} + +static unsigned int loongarch_pmu_read_control(unsigned int idx) +{ + unsigned int val = -1; + + switch (idx) { + case 0: + val = read_csr_perfctrl0(); + break; + case 1: + val = read_csr_perfctrl1(); + break; + case 2: + val = read_csr_perfctrl2(); + break; + case 3: + val = read_csr_perfctrl3(); + break; + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } + + return val; +} + +static void loongarch_pmu_write_control(unsigned int idx, unsigned int val) +{ + switch (idx) { + case 0: + write_csr_perfctrl0(val); + return; + case 1: + write_csr_perfctrl1(val); + return; + case 2: + write_csr_perfctrl2(val); + return; + case 3: + write_csr_perfctrl3(val); + return; + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return; + } +} + +static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) +{ + int i; + + for (i = 0; i < loongarch_pmu.num_counters; i++) { + if (!test_and_set_bit(i, cpuc->used_mask)) + return i; + } + + return -EAGAIN; +} + +static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx) +{ + unsigned int cpu; + struct perf_event *event = container_of(evt, struct perf_event, hw); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters); + + /* Make sure interrupt enabled. */ + cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) | + (evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE; + + cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id(); + + /* + * We do not actually let the counter run. Leave it until start(). + */ + pr_debug("Enabling perf counter for CPU%d\n", cpu); +} + +static void loongarch_pmu_disable_event(int idx) +{ + unsigned long flags; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters); + + local_irq_save(flags); + cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) & + ~M_PERFCTL_COUNT_EVENT_WHENEVER; + loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]); + local_irq_restore(flags); +} + +static int loongarch_pmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + int ret = 0; + u64 left = local64_read(&hwc->period_left); + u64 period = hwc->sample_period; + + if (unlikely((left + period) & (1ULL << 63))) { + /* left underflowed by more than period. */ + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } else if (unlikely((left + period) <= period)) { + /* left underflowed by less than period. */ + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > loongarch_pmu.max_period) { + left = loongarch_pmu.max_period; + local64_set(&hwc->period_left, left); + } + + local64_set(&hwc->prev_count, loongarch_pmu.overflow - left); + + loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left); + + perf_event_update_userpage(event); + + return ret; +} + +static void loongarch_pmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + u64 delta; + u64 prev_raw_count, new_raw_count; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = loongarch_pmu.read_counter(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = new_raw_count - prev_raw_count; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static void loongarch_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* Set the period for the event. */ + loongarch_pmu_event_set_period(event, hwc, hwc->idx); + + /* Enable the event. */ + loongarch_pmu_enable_event(hwc, hwc->idx); +} + +static void loongarch_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* We are working on a local event. */ + loongarch_pmu_disable_event(hwc->idx); + barrier(); + loongarch_pmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int loongarch_pmu_add(struct perf_event *event, int flags) +{ + int idx, err = 0; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + perf_pmu_disable(event->pmu); + + /* To look for a free counter for this event. */ + idx = loongarch_pmu_alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; + loongarch_pmu_disable_event(idx); + cpuc->events[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + loongarch_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; +} + +static void loongarch_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters); + + loongarch_pmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static void loongarch_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + loongarch_pmu_event_update(event, hwc, hwc->idx); +} + +static void loongarch_pmu_enable(struct pmu *pmu) +{ + resume_local_counters(); +} + +static void loongarch_pmu_disable(struct pmu *pmu) +{ + pause_local_counters(); +} + +static DEFINE_MUTEX(pmu_reserve_mutex); +static atomic_t active_events = ATOMIC_INIT(0); + +static int get_pmc_irq(void) +{ + struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + + if (d) + return irq_create_mapping(d, EXCCODE_PMC - EXCCODE_INT_START); + + return -EINVAL; +} + +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { + on_each_cpu(reset_counters, NULL, 1); + free_irq(get_pmc_irq(), &loongarch_pmu); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static void handle_associated_event(struct cpu_hw_events *cpuc, int idx, + struct perf_sample_data *data, struct pt_regs *regs) +{ + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc = &event->hw; + + loongarch_pmu_event_update(event, hwc, idx); + data->period = event->hw.last_period; + if (!loongarch_pmu_event_set_period(event, hwc, idx)) + return; + + if (perf_event_overflow(event, data, regs)) + loongarch_pmu_disable_event(idx); +} + +static irqreturn_t pmu_handle_irq(int irq, void *dev) +{ + int n; + int handled = IRQ_NONE; + uint64_t counter; + struct pt_regs *regs; + struct perf_sample_data data; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * First we pause the local counters, so that when we are locked + * here, the counters are all paused. When it gets locked due to + * perf_disable(), the timer interrupt handler will be delayed. + * + * See also loongarch_pmu_start(). + */ + pause_local_counters(); + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0, 0); + + for (n = 0; n < loongarch_pmu.num_counters; n++) { + if (test_bit(n, cpuc->used_mask)) { + counter = loongarch_pmu.read_counter(n); + if (counter & loongarch_pmu.overflow) { + handle_associated_event(cpuc, n, &data, regs); + handled = IRQ_HANDLED; + } + } + } + + resume_local_counters(); + + /* + * Do all the work for the pending perf events. We can do this + * in here because the performance counter interrupt is a regular + * interrupt, not NMI. + */ + if (handled == IRQ_HANDLED) + irq_work_run(); + + return handled; +} + +static int loongarch_pmu_event_init(struct perf_event *event) +{ + int r, irq; + unsigned long flags; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + /* Init it to avoid false validate_group */ + event->hw.event_base = 0xffffffff; + return -ENOENT; + } + + if (event->cpu >= 0 && !cpu_online(event->cpu)) + return -ENODEV; + + irq = get_pmc_irq(); + flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED; + if (!atomic_inc_not_zero(&active_events)) { + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) { + r = request_irq(irq, pmu_handle_irq, flags, "Perf_PMU", &loongarch_pmu); + if (r < 0) { + mutex_unlock(&pmu_reserve_mutex); + pr_warn("PMU IRQ request failed\n"); + return -ENODEV; + } + } + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + return __hw_perf_event_init(event); +} + +static struct pmu pmu = { + .pmu_enable = loongarch_pmu_enable, + .pmu_disable = loongarch_pmu_disable, + .event_init = loongarch_pmu_event_init, + .add = loongarch_pmu_add, + .del = loongarch_pmu_del, + .start = loongarch_pmu_start, + .stop = loongarch_pmu_stop, + .read = loongarch_pmu_read, +}; + +static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev) +{ + return M_PERFCTL_EVENT(pev->event_id); +} + +static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx) +{ + const struct loongarch_perf_event *pev; + + pev = &(*loongarch_pmu.general_event_map)[idx]; + + if (pev->event_id == HW_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + return pev; +} + +static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct loongarch_perf_event *pev; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + pev = &((*loongarch_pmu.cache_event_map) + [cache_type] + [cache_op] + [cache_result]); + + if (pev->event_id == CACHE_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + return pev; +} + +static int validate_group(struct perf_event *event) +{ + struct cpu_hw_events fake_cpuc; + struct perf_event *sibling, *leader = event->group_leader; + + memset(&fake_cpuc, 0, sizeof(fake_cpuc)); + + if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) + return -EINVAL; + + for_each_sibling_event(sibling, leader) { + if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) + return -EINVAL; + } + + if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) + return -EINVAL; + + return 0; +} + +static void reset_counters(void *arg) +{ + int n; + int counters = loongarch_pmu.num_counters; + + for (n = 0; n < counters; n++) { + loongarch_pmu_write_control(n, 0); + loongarch_pmu.write_counter(n, 0); + } +} + +static const struct loongarch_perf_event loongson_event_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00 }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x09 }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 }, +}; + +static const struct loongarch_perf_event loongson_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +PERF_CACHE_MAP_ALL_UNSUPPORTED, +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x8 }, + [C(RESULT_MISS)] = { 0x9 }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x8 }, + [C(RESULT_MISS)] = { 0x9 }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { 0xaa }, + [C(RESULT_MISS)] = { 0xa9 }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x6 }, + [C(RESULT_MISS)] = { 0x7 }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0xc }, + [C(RESULT_MISS)] = { 0xd }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0xc }, + [C(RESULT_MISS)] = { 0xd }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x3b }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x4 }, + [C(RESULT_MISS)] = { 0x3c }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x4 }, + [C(RESULT_MISS)] = { 0x3c }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02 }, + [C(RESULT_MISS)] = { 0x03 }, + }, +}, +}; + +static int __hw_perf_event_init(struct perf_event *event) +{ + int err; + struct hw_perf_event *hwc = &event->hw; + struct perf_event_attr *attr = &event->attr; + const struct loongarch_perf_event *pev; + + /* Returning LoongArch event descriptor for generic perf event. */ + if (PERF_TYPE_HARDWARE == event->attr.type) { + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -EINVAL; + pev = loongarch_pmu_map_general_event(event->attr.config); + } else if (PERF_TYPE_HW_CACHE == event->attr.type) { + pev = loongarch_pmu_map_cache_event(event->attr.config); + } else if (PERF_TYPE_RAW == event->attr.type) { + /* We are working on the global raw event. */ + mutex_lock(&raw_event_mutex); + pev = loongarch_pmu.map_raw_event(event->attr.config); + } else { + /* The event type is not (yet) supported. */ + return -EOPNOTSUPP; + } + + if (IS_ERR(pev)) { + if (PERF_TYPE_RAW == event->attr.type) + mutex_unlock(&raw_event_mutex); + return PTR_ERR(pev); + } + + /* + * We allow max flexibility on how each individual counter shared + * by the single CPU operates (the mode exclusion and the range). + */ + hwc->config_base = CSR_PERFCTRL_IE; + + hwc->event_base = loongarch_pmu_perf_event_encode(pev); + if (PERF_TYPE_RAW == event->attr.type) + mutex_unlock(&raw_event_mutex); + + if (!attr->exclude_user) { + hwc->config_base |= CSR_PERFCTRL_PLV3; + hwc->config_base |= CSR_PERFCTRL_PLV2; + } + if (!attr->exclude_kernel) { + hwc->config_base |= CSR_PERFCTRL_PLV0; + } + if (!attr->exclude_hv) { + hwc->config_base |= CSR_PERFCTRL_PLV1; + } + + hwc->config_base &= M_PERFCTL_CONFIG_MASK; + /* + * The event can belong to another cpu. We do not assign a local + * counter for it for now. + */ + hwc->idx = -1; + hwc->config = 0; + + if (!hwc->sample_period) { + hwc->sample_period = loongarch_pmu.max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + err = 0; + if (event->group_leader != event) + err = validate_group(event); + + event->destroy = hw_perf_event_destroy; + + if (err) + event->destroy(event); + + return err; +} + +static void pause_local_counters(void) +{ + unsigned long flags; + int ctr = loongarch_pmu.num_counters; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + local_irq_save(flags); + do { + ctr--; + cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr); + loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & + ~M_PERFCTL_COUNT_EVENT_WHENEVER); + } while (ctr > 0); + local_irq_restore(flags); +} + +static void resume_local_counters(void) +{ + int ctr = loongarch_pmu.num_counters; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + do { + ctr--; + loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); + } while (ctr > 0); +} + +static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config) +{ + raw_event.event_id = M_PERFCTL_EVENT(config); + + return &raw_event; +} + +static int __init init_hw_perf_events(void) +{ + int counters; + + if (!cpu_has_pmp) + return -ENODEV; + + pr_info("Performance counters: "); + counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1; + + loongarch_pmu.num_counters = counters; + loongarch_pmu.max_period = (1ULL << 63) - 1; + loongarch_pmu.valid_count = (1ULL << 63) - 1; + loongarch_pmu.overflow = 1ULL << 63; + loongarch_pmu.name = "loongarch/loongson64"; + loongarch_pmu.read_counter = loongarch_pmu_read_counter; + loongarch_pmu.write_counter = loongarch_pmu_write_counter; + loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event; + loongarch_pmu.general_event_map = &loongson_event_map; + loongarch_pmu.cache_event_map = &loongson_cache_map; + + on_each_cpu(reset_counters, NULL, 1); + + pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n", + loongarch_pmu.name, counters, 64); + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + + return 0; +} +early_initcall(init_hw_perf_events); diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c new file mode 100644 index 000000000..263ac4ab5 --- /dev/null +++ b/arch/loongarch/kernel/perf_regs.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 2013 Cavium, Inc. + */ + +#include <linux/perf_event.h> + +#include <asm/ptrace.h> + +#ifdef CONFIG_32BIT +u64 perf_reg_abi(struct task_struct *tsk) +{ + return PERF_SAMPLE_REGS_ABI_32; +} +#else /* Must be CONFIG_64BIT */ +u64 perf_reg_abi(struct task_struct *tsk) +{ + if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS)) + return PERF_SAMPLE_REGS_ABI_32; + else + return PERF_SAMPLE_REGS_ABI_64; +} +#endif /* CONFIG_32BIT */ + +int perf_reg_validate(u64 mask) +{ + if (!mask) + return -EINVAL; + if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1)) + return -EINVAL; + return 0; +} + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX)) + return 0; + + if ((u32)idx == PERF_REG_LOONGARCH_PC) + return regs->csr_era; + + return regs->regs[idx]; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c new file mode 100644 index 000000000..0d82907b5 --- /dev/null +++ b/arch/loongarch/kernel/proc.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <asm/bootinfo.h> +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm/idle.h> +#include <asm/processor.h> +#include <asm/time.h> + +/* + * No lock; only written during early bootup by CPU 0. + */ +static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain); + +int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb) +{ + return raw_notifier_chain_register(&proc_cpuinfo_chain, nb); +} + +int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v) +{ + return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v); +} + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + unsigned long n = (unsigned long) v - 1; + unsigned int version = cpu_data[n].processor_id & 0xff; + unsigned int fp_version = cpu_data[n].fpu_vers; + struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args; + +#ifdef CONFIG_SMP + if (!cpu_online(n)) + return 0; +#endif + + /* + * For the first processor also print the system type + */ + if (n == 0) + seq_printf(m, "system type\t\t: %s\n\n", get_system_type()); + + seq_printf(m, "processor\t\t: %ld\n", n); + seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package); + seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); + seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]); + seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]); + seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version); + seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version); + seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n", + cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100); + seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n", + (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ), + ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100); + seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize); + seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n", + cpu_pabits + 1, cpu_vabits + 1); + + seq_printf(m, "ISA\t\t\t:"); + if (cpu_has_loongarch32) + seq_printf(m, " loongarch32"); + if (cpu_has_loongarch64) + seq_printf(m, " loongarch64"); + seq_printf(m, "\n"); + + seq_printf(m, "Features\t\t:"); + if (cpu_has_cpucfg) seq_printf(m, " cpucfg"); + if (cpu_has_lam) seq_printf(m, " lam"); + if (cpu_has_ual) seq_printf(m, " ual"); + if (cpu_has_fpu) seq_printf(m, " fpu"); + if (cpu_has_lsx) seq_printf(m, " lsx"); + if (cpu_has_lasx) seq_printf(m, " lasx"); + if (cpu_has_crc32) seq_printf(m, " crc32"); + if (cpu_has_complex) seq_printf(m, " complex"); + if (cpu_has_crypto) seq_printf(m, " crypto"); + if (cpu_has_lvz) seq_printf(m, " lvz"); + if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86"); + if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm"); + if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips"); + seq_printf(m, "\n"); + + seq_printf(m, "Hardware Watchpoint\t: %s", + cpu_has_watch ? "yes, " : "no\n"); + if (cpu_has_watch) { + seq_printf(m, "iwatch count: %d, dwatch count: %d\n", + cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count); + } + + proc_cpuinfo_notifier_args.m = m; + proc_cpuinfo_notifier_args.n = n; + + raw_notifier_call_chain(&proc_cpuinfo_chain, 0, + &proc_cpuinfo_notifier_args); + + seq_printf(m, "\n"); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + unsigned long i = *pos; + + return i < nr_cpu_ids ? (void *)(i + 1) : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c new file mode 100644 index 000000000..1259bc312 --- /dev/null +++ b/arch/loongarch/kernel/process.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. + * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2004 Thiemo Seufer + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/export.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/personality.h> +#include <linux/sys.h> +#include <linux/completion.h> +#include <linux/kallsyms.h> +#include <linux/random.h> +#include <linux/prctl.h> +#include <linux/nmi.h> + +#include <asm/asm.h> +#include <asm/bootinfo.h> +#include <asm/cpu.h> +#include <asm/elf.h> +#include <asm/fpu.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/loongarch.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/reg.h> +#include <asm/unwind.h> +#include <asm/vdso.h> + +/* + * Idle related variables and functions + */ + +unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; +EXPORT_SYMBOL(boot_option_idle_override); + +#ifdef CONFIG_HOTPLUG_CPU +void arch_cpu_idle_dead(void) +{ + play_dead(); +} +#endif + +asmlinkage void ret_from_fork(void); +asmlinkage void ret_from_kernel_thread(void); + +void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) +{ + unsigned long crmd; + unsigned long prmd; + unsigned long euen; + + /* New thread loses kernel privileges. */ + crmd = regs->csr_crmd & ~(PLV_MASK); + crmd |= PLV_USER; + regs->csr_crmd = crmd; + + prmd = regs->csr_prmd & ~(PLV_MASK); + prmd |= PLV_USER; + regs->csr_prmd = prmd; + + euen = regs->csr_euen & ~(CSR_EUEN_FPEN); + regs->csr_euen = euen; + lose_fpu(0); + current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0; + + clear_thread_flag(TIF_LSX_CTX_LIVE); + clear_thread_flag(TIF_LASX_CTX_LIVE); + clear_used_math(); + regs->csr_era = pc; + regs->regs[3] = sp; +} + +void exit_thread(struct task_struct *tsk) +{ +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + /* + * Save any process state which is live in hardware registers to the + * parent context prior to duplication. This prevents the new child + * state becoming stale if the parent is preempted before copy_thread() + * gets a chance to save the parent's live hardware registers to the + * child context. + */ + preempt_disable(); + + if (is_fpu_owner()) + save_fp(current); + + preempt_enable(); + + if (used_math()) + memcpy(dst, src, sizeof(struct task_struct)); + else + memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr)); + + return 0; +} + +/* + * Copy architecture-specific thread state + */ +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long childksp; + unsigned long tls = args->tls; + unsigned long usp = args->stack; + unsigned long clone_flags = args->flags; + struct pt_regs *childregs, *regs = current_pt_regs(); + + childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE; + + /* set up new TSS. */ + childregs = (struct pt_regs *) childksp - 1; + /* Put the stack after the struct pt_regs. */ + childksp = (unsigned long) childregs; + p->thread.sched_cfa = 0; + p->thread.csr_euen = 0; + p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD); + p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD); + p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG); + if (unlikely(args->fn)) { + /* kernel thread */ + p->thread.reg03 = childksp; + p->thread.reg23 = (unsigned long)args->fn; + p->thread.reg24 = (unsigned long)args->fn_arg; + p->thread.reg01 = (unsigned long)ret_from_kernel_thread; + p->thread.sched_ra = (unsigned long)ret_from_kernel_thread; + memset(childregs, 0, sizeof(struct pt_regs)); + childregs->csr_euen = p->thread.csr_euen; + childregs->csr_crmd = p->thread.csr_crmd; + childregs->csr_prmd = p->thread.csr_prmd; + childregs->csr_ecfg = p->thread.csr_ecfg; + goto out; + } + + /* user thread */ + *childregs = *regs; + childregs->regs[4] = 0; /* Child gets zero as return value */ + if (usp) + childregs->regs[3] = usp; + + p->thread.reg03 = (unsigned long) childregs; + p->thread.reg01 = (unsigned long) ret_from_fork; + p->thread.sched_ra = (unsigned long) ret_from_fork; + + /* + * New tasks lose permission to use the fpu. This accelerates context + * switching for most programs since they don't use the fpu. + */ + childregs->csr_euen = 0; + + if (clone_flags & CLONE_SETTLS) + childregs->regs[2] = tls; + +out: + clear_tsk_thread_flag(p, TIF_USEDFPU); + clear_tsk_thread_flag(p, TIF_USEDSIMD); + clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE); + clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE); + + return 0; +} + +unsigned long __get_wchan(struct task_struct *task) +{ + unsigned long pc = 0; + struct unwind_state state; + + if (!try_get_task_stack(task)) + return 0; + + for (unwind_start(&state, task, NULL); + !unwind_done(&state); unwind_next_frame(&state)) { + pc = unwind_get_return_address(&state); + if (!pc) + break; + if (in_sched_functions(pc)) + continue; + break; + } + + put_task_stack(task); + + return pc; +} + +bool in_irq_stack(unsigned long stack, struct stack_info *info) +{ + unsigned long nextsp; + unsigned long begin = (unsigned long)this_cpu_read(irq_stack); + unsigned long end = begin + IRQ_STACK_START; + + if (stack < begin || stack >= end) + return false; + + nextsp = *(unsigned long *)end; + if (nextsp & (SZREG - 1)) + return false; + + info->begin = begin; + info->end = end; + info->next_sp = nextsp; + info->type = STACK_TYPE_IRQ; + + return true; +} + +bool in_task_stack(unsigned long stack, struct task_struct *task, + struct stack_info *info) +{ + unsigned long begin = (unsigned long)task_stack_page(task); + unsigned long end = begin + THREAD_SIZE; + + if (stack < begin || stack >= end) + return false; + + info->begin = begin; + info->end = end; + info->next_sp = 0; + info->type = STACK_TYPE_TASK; + + return true; +} + +int get_stack_info(unsigned long stack, struct task_struct *task, + struct stack_info *info) +{ + task = task ? : current; + + if (!stack || stack & (SZREG - 1)) + goto unknown; + + if (in_task_stack(stack, task, info)) + return 0; + + if (task != current) + goto unknown; + + if (in_irq_stack(stack, info)) + return 0; + +unknown: + info->type = STACK_TYPE_UNKNOWN; + return -EINVAL; +} + +unsigned long stack_top(void) +{ + unsigned long top = TASK_SIZE & PAGE_MASK; + + /* Space for the VDSO & data page */ + top -= PAGE_ALIGN(current->thread.vdso->size); + top -= PAGE_SIZE; + + /* Space to randomize the VDSO base */ + if (current->flags & PF_RANDOMIZE) + top -= VDSO_RANDOMIZE_SIZE; + + return top; +} + +/* + * Don't forget that the stack pointer must be aligned on a 8 bytes + * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. + */ +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= prandom_u32_max(PAGE_SIZE); + + return sp & STACK_ALIGN; +} + +static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); +static struct cpumask backtrace_csd_busy; + +static void handle_backtrace(void *info) +{ + nmi_cpu_backtrace(get_irq_regs()); + cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); +} + +static void raise_backtrace(cpumask_t *mask) +{ + call_single_data_t *csd; + int cpu; + + for_each_cpu(cpu, mask) { + /* + * If we previously sent an IPI to the target CPU & it hasn't + * cleared its bit in the busy cpumask then it didn't handle + * our previous IPI & it's not safe for us to reuse the + * call_single_data_t. + */ + if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { + pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", + cpu); + continue; + } + + csd = &per_cpu(backtrace_csd, cpu); + csd->func = handle_backtrace; + smp_call_function_single_async(cpu, csd); + } +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +{ + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); +} + +#ifdef CONFIG_64BIT +void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs) +{ + unsigned int i; + + for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) { + uregs[i] = regs->regs[i - LOONGARCH_EF_R0]; + } + + uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0; + uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era; + uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr; + uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd; + uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd; + uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen; + uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg; + uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat; +} +#endif /* CONFIG_64BIT */ diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c new file mode 100644 index 000000000..dc2b82ea8 --- /dev/null +++ b/arch/loongarch/kernel/ptrace.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1992 Ross Biro + * Copyright (C) Linus Torvalds + * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle + * Copyright (C) 1996 David S. Miller + * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 1999 MIPS Technologies, Inc. + * Copyright (C) 2000 Ulf Carlsson + */ +#include <linux/kernel.h> +#include <linux/audit.h> +#include <linux/compiler.h> +#include <linux/context_tracking.h> +#include <linux/elf.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/ptrace.h> +#include <linux/regset.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/security.h> +#include <linux/smp.h> +#include <linux/stddef.h> +#include <linux/seccomp.h> +#include <linux/uaccess.h> + +#include <asm/byteorder.h> +#include <asm/cpu.h> +#include <asm/cpu-info.h> +#include <asm/fpu.h> +#include <asm/loongarch.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/reg.h> +#include <asm/syscall.h> + +static void init_fp_ctx(struct task_struct *target) +{ + /* The target already has context */ + if (tsk_used_math(target)) + return; + + /* Begin with data registers set to all 1s... */ + memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); + set_stopped_child_used_math(target); +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *child) +{ + /* Don't load the watchpoint registers for the ex-child. */ + clear_tsk_thread_flag(child, TIF_LOAD_WATCH); + clear_tsk_thread_flag(child, TIF_SINGLESTEP); +} + +/* regset get/set implementations */ + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + int r; + struct pt_regs *regs = task_pt_regs(target); + + r = membuf_write(&to, ®s->regs, sizeof(u64) * GPR_NUM); + r = membuf_write(&to, ®s->orig_a0, sizeof(u64)); + r = membuf_write(&to, ®s->csr_era, sizeof(u64)); + r = membuf_write(&to, ®s->csr_badvaddr, sizeof(u64)); + + return r; +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int err; + int a0_start = sizeof(u64) * GPR_NUM; + int era_start = a0_start + sizeof(u64); + int badvaddr_start = era_start + sizeof(u64); + struct pt_regs *regs = task_pt_regs(target); + + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->regs, + 0, a0_start); + err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->orig_a0, + a0_start, a0_start + sizeof(u64)); + err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->csr_era, + era_start, era_start + sizeof(u64)); + err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->csr_badvaddr, + badvaddr_start, badvaddr_start + sizeof(u64)); + + return err; +} + + +/* + * Get the general floating-point registers. + */ +static int gfpr_get(struct task_struct *target, struct membuf *to) +{ + return membuf_write(to, &target->thread.fpu.fpr, + sizeof(elf_fpreg_t) * NUM_FPU_REGS); +} + +static int gfpr_get_simd(struct task_struct *target, struct membuf *to) +{ + int i, r; + u64 fpr_val; + + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); + for (i = 0; i < NUM_FPU_REGS; i++) { + fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); + r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t)); + } + + return r; +} + +/* + * Choose the appropriate helper for general registers, and then copy + * the FCC and FCSR registers separately. + */ +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + int r; + + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) + r = gfpr_get(target, &to); + else + r = gfpr_get_simd(target, &to); + + r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc)); + r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr)); + + return r; +} + +static int gfpr_set(struct task_struct *target, + unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf) +{ + return user_regset_copyin(pos, count, kbuf, ubuf, + &target->thread.fpu.fpr, + 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); +} + +static int gfpr_set_simd(struct task_struct *target, + unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf) +{ + int i, err; + u64 fpr_val; + + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); + for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { + err = user_regset_copyin(pos, count, kbuf, ubuf, + &fpr_val, i * sizeof(elf_fpreg_t), + (i + 1) * sizeof(elf_fpreg_t)); + if (err) + return err; + set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); + } + + return 0; +} + +/* + * Choose the appropriate helper for general registers, and then copy + * the FCC register separately. + */ +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t); + const int fcsr_start = fcc_start + sizeof(u64); + int err; + + BUG_ON(count % sizeof(elf_fpreg_t)); + if (pos + count > sizeof(elf_fpregset_t)) + return -EIO; + + init_fp_ctx(target); + + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) + err = gfpr_set(target, &pos, &count, &kbuf, &ubuf); + else + err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf); + if (err) + return err; + + err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fcc, fcc_start, + fcc_start + sizeof(u64)); + err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fcsr, fcsr_start, + fcsr_start + sizeof(u32)); + + return err; +} + +static int cfg_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + int i, r; + u32 cfg_val; + + i = 0; + while (to.left > 0) { + cfg_val = read_cpucfg(i++); + r = membuf_write(&to, &cfg_val, sizeof(u32)); + } + + return r; +} + +/* + * CFG registers are read-only. + */ +static int cfg_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return 0; +} + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(r0, regs[0]), + REG_OFFSET_NAME(r1, regs[1]), + REG_OFFSET_NAME(r2, regs[2]), + REG_OFFSET_NAME(r3, regs[3]), + REG_OFFSET_NAME(r4, regs[4]), + REG_OFFSET_NAME(r5, regs[5]), + REG_OFFSET_NAME(r6, regs[6]), + REG_OFFSET_NAME(r7, regs[7]), + REG_OFFSET_NAME(r8, regs[8]), + REG_OFFSET_NAME(r9, regs[9]), + REG_OFFSET_NAME(r10, regs[10]), + REG_OFFSET_NAME(r11, regs[11]), + REG_OFFSET_NAME(r12, regs[12]), + REG_OFFSET_NAME(r13, regs[13]), + REG_OFFSET_NAME(r14, regs[14]), + REG_OFFSET_NAME(r15, regs[15]), + REG_OFFSET_NAME(r16, regs[16]), + REG_OFFSET_NAME(r17, regs[17]), + REG_OFFSET_NAME(r18, regs[18]), + REG_OFFSET_NAME(r19, regs[19]), + REG_OFFSET_NAME(r20, regs[20]), + REG_OFFSET_NAME(r21, regs[21]), + REG_OFFSET_NAME(r22, regs[22]), + REG_OFFSET_NAME(r23, regs[23]), + REG_OFFSET_NAME(r24, regs[24]), + REG_OFFSET_NAME(r25, regs[25]), + REG_OFFSET_NAME(r26, regs[26]), + REG_OFFSET_NAME(r27, regs[27]), + REG_OFFSET_NAME(r28, regs[28]), + REG_OFFSET_NAME(r29, regs[29]), + REG_OFFSET_NAME(r30, regs[30]), + REG_OFFSET_NAME(r31, regs[31]), + REG_OFFSET_NAME(orig_a0, orig_a0), + REG_OFFSET_NAME(csr_era, csr_era), + REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr), + REG_OFFSET_NAME(csr_crmd, csr_crmd), + REG_OFFSET_NAME(csr_prmd, csr_prmd), + REG_OFFSET_NAME(csr_euen, csr_euen), + REG_OFFSET_NAME(csr_ecfg, csr_ecfg), + REG_OFFSET_NAME(csr_estat, csr_estat), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +enum loongarch_regset { + REGSET_GPR, + REGSET_FPR, + REGSET_CPUCFG, +}; + +static const struct user_regset loongarch64_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(elf_greg_t), + .align = sizeof(elf_greg_t), + .regset_get = gpr_get, + .set = gpr_set, + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = ELF_NFPREG, + .size = sizeof(elf_fpreg_t), + .align = sizeof(elf_fpreg_t), + .regset_get = fpr_get, + .set = fpr_set, + }, + [REGSET_CPUCFG] = { + .core_note_type = NT_LOONGARCH_CPUCFG, + .n = 64, + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = cfg_get, + .set = cfg_set, + }, +}; + +static const struct user_regset_view user_loongarch64_view = { + .name = "loongarch64", + .e_machine = ELF_ARCH, + .regsets = loongarch64_regsets, + .n = ARRAY_SIZE(loongarch64_regsets), +}; + + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_loongarch64_view; +} + +static inline int read_user(struct task_struct *target, unsigned long addr, + unsigned long __user *data) +{ + unsigned long tmp = 0; + + switch (addr) { + case 0 ... 31: + tmp = task_pt_regs(target)->regs[addr]; + break; + case ARG0: + tmp = task_pt_regs(target)->orig_a0; + break; + case PC: + tmp = task_pt_regs(target)->csr_era; + break; + case BADVADDR: + tmp = task_pt_regs(target)->csr_badvaddr; + break; + default: + return -EIO; + } + + return put_user(tmp, data); +} + +static inline int write_user(struct task_struct *target, unsigned long addr, + unsigned long data) +{ + switch (addr) { + case 0 ... 31: + task_pt_regs(target)->regs[addr] = data; + break; + case ARG0: + task_pt_regs(target)->orig_a0 = data; + break; + case PC: + task_pt_regs(target)->csr_era = data; + break; + case BADVADDR: + task_pt_regs(target)->csr_badvaddr = data; + break; + default: + return -EIO; + } + + return 0; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + int ret; + unsigned long __user *datap = (void __user *) data; + + switch (request) { + case PTRACE_PEEKUSR: + ret = read_user(child, addr, datap); + break; + + case PTRACE_POKEUSR: + ret = write_user(child, addr, data); + break; + + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S new file mode 100644 index 000000000..d13252553 --- /dev/null +++ b/arch/loongarch/kernel/relocate_kernel.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * relocate_kernel.S for kexec + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#include <linux/kexec.h> + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/regdef.h> +#include <asm/loongarch.h> +#include <asm/stackframe.h> +#include <asm/addrspace.h> + +SYM_CODE_START(relocate_new_kernel) + /* + * a0: EFI boot flag for the new kernel + * a1: Command line pointer for the new kernel + * a2: System table pointer for the new kernel + * a3: Start address to jump to after relocation + * a4: Pointer to the current indirection page entry + */ + move s0, a4 + + /* + * In case of a kdump/crash kernel, the indirection page is not + * populated as the kernel is directly copied to a reserved location + */ + beqz s0, done + +process_entry: + PTR_L s1, s0, 0 + PTR_ADDI s0, s0, SZREG + + /* destination page */ + andi s2, s1, IND_DESTINATION + beqz s2, 1f + li.w t0, ~0x1 + and s3, s1, t0 /* store destination addr in s3 */ + b process_entry + +1: + /* indirection page, update s0 */ + andi s2, s1, IND_INDIRECTION + beqz s2, 1f + li.w t0, ~0x2 + and s0, s1, t0 + b process_entry + +1: + /* done page */ + andi s2, s1, IND_DONE + beqz s2, 1f + b done + +1: + /* source page */ + andi s2, s1, IND_SOURCE + beqz s2, process_entry + li.w t0, ~0x8 + and s1, s1, t0 + li.w s5, (1 << _PAGE_SHIFT) / SZREG + +copy_word: + /* copy page word by word */ + REG_L s4, s1, 0 + REG_S s4, s3, 0 + PTR_ADDI s3, s3, SZREG + PTR_ADDI s1, s1, SZREG + LONG_ADDI s5, s5, -1 + beqz s5, process_entry + b copy_word + b process_entry + +done: + ibar 0 + dbar 0 + + /* + * Jump to the new kernel, + * make sure the values of a0, a1, a2 and a3 are not changed. + */ + jr a3 +SYM_CODE_END(relocate_new_kernel) + +#ifdef CONFIG_SMP +/* + * Other CPUs should wait until code is relocated and + * then start at the entry point from LOONGARCH_IOCSR_MBUF0. + */ +SYM_CODE_START(kexec_smp_wait) +1: li.w t0, 0x100 /* wait for init loop */ +2: addi.w t0, t0, -1 /* limit mailbox access */ + bnez t0, 2b + li.w t1, LOONGARCH_IOCSR_MBUF0 + iocsrrd.w s0, t1 /* check PC as an indicator */ + beqz s0, 1b + iocsrrd.d s0, t1 /* get PC via mailbox */ + + li.d t0, CACHE_BASE + or s0, s0, t0 /* s0 = TO_CACHE(s0) */ + jr s0 /* jump to initial PC */ +SYM_CODE_END(kexec_smp_wait) +#endif + +relocate_new_kernel_end: + +SYM_DATA_START(relocate_new_kernel_size) + PTR relocate_new_kernel_end - relocate_new_kernel +SYM_DATA_END(relocate_new_kernel_size) diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c new file mode 100644 index 000000000..8c82021eb --- /dev/null +++ b/arch/loongarch/kernel/reset.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/kernel.h> +#include <linux/acpi.h> +#include <linux/efi.h> +#include <linux/export.h> +#include <linux/pm.h> +#include <linux/types.h> +#include <linux/reboot.h> +#include <linux/delay.h> +#include <linux/console.h> + +#include <acpi/reboot.h> +#include <asm/idle.h> +#include <asm/loongarch.h> + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void machine_halt(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + local_irq_disable(); + clear_csr_ecfg(ECFG0_IM); + + pr_notice("\n\n** You can safely turn off the power now **\n\n"); + console_flush_on_panic(CONSOLE_FLUSH_PENDING); + + while (true) { + __arch_cpu_idle(); + } +} + +void machine_power_off(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_power_off(); +#ifdef CONFIG_EFI + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); +#endif + + while (true) { + __arch_cpu_idle(); + } +} + +void machine_restart(char *command) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_restart(command); +#ifdef CONFIG_EFI + if (efi_capsule_pending(NULL)) + efi_reboot(REBOOT_WARM, NULL); + else + efi_reboot(REBOOT_COLD, NULL); +#endif + if (!acpi_disabled) + acpi_reboot(); + + while (true) { + __arch_cpu_idle(); + } +} diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c new file mode 100644 index 000000000..ae436def7 --- /dev/null +++ b/arch/loongarch/kernel/setup.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1995 Waldorf Electronics + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle + * Copyright (C) 1996 Stoned Elipot + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki + */ +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/efi.h> +#include <linux/export.h> +#include <linux/screen_info.h> +#include <linux/memblock.h> +#include <linux/initrd.h> +#include <linux/ioport.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> +#include <linux/root_dev.h> +#include <linux/console.h> +#include <linux/pfn.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> +#include <linux/device.h> +#include <linux/dma-map-ops.h> +#include <linux/swiotlb.h> + +#include <asm/addrspace.h> +#include <asm/bootinfo.h> +#include <asm/cache.h> +#include <asm/cpu.h> +#include <asm/dma.h> +#include <asm/efi.h> +#include <asm/loongson.h> +#include <asm/numa.h> +#include <asm/pgalloc.h> +#include <asm/sections.h> +#include <asm/setup.h> +#include <asm/time.h> + +#define SMBIOS_BIOSSIZE_OFFSET 0x09 +#define SMBIOS_BIOSEXTERN_OFFSET 0x13 +#define SMBIOS_FREQLOW_OFFSET 0x16 +#define SMBIOS_FREQHIGH_OFFSET 0x17 +#define SMBIOS_FREQLOW_MASK 0xFF +#define SMBIOS_CORE_PACKAGE_OFFSET 0x23 +#define LOONGSON_EFI_ENABLE (1 << 3) + +struct screen_info screen_info __section(".data"); + +unsigned long fw_arg0, fw_arg1, fw_arg2; +DEFINE_PER_CPU(unsigned long, kernelsp); +struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly; + +EXPORT_SYMBOL(cpu_data); + +struct loongson_board_info b_info; +static const char dmi_empty_string[] = " "; + +/* + * Setup information + * + * These are initialized so they are in the .data section + */ + +static int num_standard_resources; +static struct resource *standard_resources; + +static struct resource code_resource = { .name = "Kernel code", }; +static struct resource data_resource = { .name = "Kernel data", }; +static struct resource bss_resource = { .name = "Kernel bss", }; + +const char *get_system_type(void) +{ + return "generic-loongson-machine"; +} + +static const char *dmi_string_parse(const struct dmi_header *dm, u8 s) +{ + const u8 *bp = ((u8 *) dm) + dm->length; + + if (s) { + s--; + while (s > 0 && *bp) { + bp += strlen(bp) + 1; + s--; + } + + if (*bp != 0) { + size_t len = strlen(bp)+1; + size_t cmp_len = len > 8 ? 8 : len; + + if (!memcmp(bp, dmi_empty_string, cmp_len)) + return dmi_empty_string; + + return bp; + } + } + + return ""; +} + +static void __init parse_cpu_table(const struct dmi_header *dm) +{ + long freq_temp = 0; + char *dmi_data = (char *)dm; + + freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) + + ((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK)); + cpu_clock_freq = freq_temp * 1000000; + + loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]); + loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET); + + pr_info("CpuClock = %llu\n", cpu_clock_freq); +} + +static void __init parse_bios_table(const struct dmi_header *dm) +{ + char *dmi_data = (char *)dm; + + b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6; +} + +static void __init find_tokens(const struct dmi_header *dm, void *dummy) +{ + switch (dm->type) { + case 0x0: /* Extern BIOS */ + parse_bios_table(dm); + break; + case 0x4: /* Calling interface */ + parse_cpu_table(dm); + break; + } +} +static void __init smbios_parse(void) +{ + b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR); + b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION); + b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE); + b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR); + b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME); + dmi_walk(find_tokens, NULL); +} + +static int usermem __initdata; + +static int __init early_parse_mem(char *p) +{ + phys_addr_t start, size; + + if (!p) { + pr_err("mem parameter is empty, do nothing\n"); + return -EINVAL; + } + + /* + * If a user specifies memory size, we + * blow away any automatically generated + * size. + */ + if (usermem == 0) { + usermem = 1; + memblock_remove(memblock_start_of_DRAM(), + memblock_end_of_DRAM() - memblock_start_of_DRAM()); + } + start = 0; + size = memparse(p, &p); + if (*p == '@') + start = memparse(p + 1, &p); + else { + pr_err("Invalid format!\n"); + return -EINVAL; + } + + if (!IS_ENABLED(CONFIG_NUMA)) + memblock_add(start, size); + else + memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE); + + return 0; +} +early_param("mem", early_parse_mem); + +static void __init arch_reserve_vmcore(void) +{ +#ifdef CONFIG_PROC_VMCORE + u64 i; + phys_addr_t start, end; + + if (!is_kdump_kernel()) + return; + + if (!elfcorehdr_size) { + for_each_mem_range(i, &start, &end) { + if (elfcorehdr_addr >= start && elfcorehdr_addr < end) { + /* + * Reserve from the elf core header to the end of + * the memory segment, that should all be kdump + * reserved memory. + */ + elfcorehdr_size = end - elfcorehdr_addr; + break; + } + } + } + + if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { + pr_warn("elfcorehdr is overlapped\n"); + return; + } + + memblock_reserve(elfcorehdr_addr, elfcorehdr_size); + + pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n", + elfcorehdr_size >> 10, elfcorehdr_addr); +#endif +} + +static void __init arch_parse_crashkernel(void) +{ +#ifdef CONFIG_KEXEC + int ret; + unsigned long long start; + unsigned long long total_mem; + unsigned long long crash_base, crash_size; + + total_mem = memblock_phys_mem_size(); + ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); + if (ret < 0 || crash_size <= 0) + return; + + start = memblock_phys_alloc_range(crash_size, 1, crash_base, crash_base + crash_size); + if (start != crash_base) { + pr_warn("Invalid memory region reserved for crash kernel\n"); + return; + } + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; +#endif +} + +void __init platform_init(void) +{ + arch_reserve_vmcore(); + arch_parse_crashkernel(); + +#ifdef CONFIG_ACPI_TABLE_UPGRADE + acpi_table_upgrade(); +#endif +#ifdef CONFIG_ACPI + acpi_gbl_use_default_register_widths = false; + acpi_boot_table_init(); +#endif + +#ifdef CONFIG_NUMA + init_numa_memory(); +#endif + dmi_setup(); + smbios_parse(); + pr_info("The BIOS Version: %s\n", b_info.bios_version); + + efi_runtime_init(); +} + +static void __init check_kernel_sections_mem(void) +{ + phys_addr_t start = __pa_symbol(&_text); + phys_addr_t size = __pa_symbol(&_end) - start; + + if (!memblock_is_region_memory(start, size)) { + pr_info("Kernel sections are not in the memory maps\n"); + memblock_add(start, size); + } +} + +/* + * arch_mem_init - initialize memory management subsystem + */ +static void __init arch_mem_init(char **cmdline_p) +{ + if (usermem) + pr_info("User-defined physical RAM map overwrite\n"); + + check_kernel_sections_mem(); + + /* + * In order to reduce the possibility of kernel panic when failed to + * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate + * low memory as small as possible before plat_swiotlb_setup(), so + * make sparse_init() using top-down allocation. + */ + memblock_set_bottom_up(false); + sparse_init(); + memblock_set_bottom_up(true); + + swiotlb_init(true, SWIOTLB_VERBOSE); + + dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); + + memblock_dump_all(); + + early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); +} + +static void __init resource_init(void) +{ + long i = 0; + size_t res_size; + struct resource *res; + struct memblock_region *region; + + code_resource.start = __pa_symbol(&_text); + code_resource.end = __pa_symbol(&_etext) - 1; + data_resource.start = __pa_symbol(&_etext); + data_resource.end = __pa_symbol(&_edata) - 1; + bss_resource.start = __pa_symbol(&__bss_start); + bss_resource.end = __pa_symbol(&__bss_stop) - 1; + + num_standard_resources = memblock.memory.cnt; + res_size = num_standard_resources * sizeof(*standard_resources); + standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES); + + for_each_mem_region(region) { + res = &standard_resources[i++]; + if (!memblock_is_nomap(region)) { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + } else { + res->name = "Reserved"; + res->flags = IORESOURCE_MEM; + res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; + } + + request_resource(&iomem_resource, res); + + /* + * We don't know which RAM region contains kernel data, + * so we try it repeatedly and let the resource manager + * test it. + */ + request_resource(res, &code_resource); + request_resource(res, &data_resource); + request_resource(res, &bss_resource); + } + +#ifdef CONFIG_KEXEC + if (crashk_res.start < crashk_res.end) { + insert_resource(&iomem_resource, &crashk_res); + pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", + (unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20), + (unsigned long)(crashk_res.start >> 20)); + } +#endif +} + +static int __init reserve_memblock_reserved_regions(void) +{ + u64 i, j; + + for (i = 0; i < num_standard_resources; ++i) { + struct resource *mem = &standard_resources[i]; + phys_addr_t r_start, r_end, mem_size = resource_size(mem); + + if (!memblock_is_region_reserved(mem->start, mem_size)) + continue; + + for_each_reserved_mem_range(j, &r_start, &r_end) { + resource_size_t start, end; + + start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); + end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); + + if (start > mem->end || end < mem->start) + continue; + + reserve_region_with_split(mem, start, end, "Reserved"); + } + } + + return 0; +} +arch_initcall(reserve_memblock_reserved_regions); + +#ifdef CONFIG_SMP +static void __init prefill_possible_map(void) +{ + int i, possible; + + possible = num_processors + disabled_cpus; + if (possible > nr_cpu_ids) + possible = nr_cpu_ids; + + pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n", + possible, max((possible - num_processors), 0)); + + for (i = 0; i < possible; i++) + set_cpu_possible(i, true); + for (; i < NR_CPUS; i++) + set_cpu_possible(i, false); + + set_nr_cpu_ids(possible); +} +#endif + +void __init setup_arch(char **cmdline_p) +{ + cpu_probe(); + *cmdline_p = boot_command_line; + + init_environ(); + efi_init(); + memblock_init(); + pagetable_init(); + parse_early_param(); + reserve_initrd_mem(); + + platform_init(); + arch_mem_init(cmdline_p); + + resource_init(); +#ifdef CONFIG_SMP + plat_smp_setup(); + prefill_possible_map(); +#endif + + paging_init(); +} diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c new file mode 100644 index 000000000..8f5b79863 --- /dev/null +++ b/arch/loongarch/kernel/signal.c @@ -0,0 +1,566 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1994 - 2000 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2014, Imagination Technologies Ltd. + */ +#include <linux/audit.h> +#include <linux/cache.h> +#include <linux/context_tracking.h> +#include <linux/irqflags.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/personality.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/unistd.h> +#include <linux/compiler.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> + +#include <asm/asm.h> +#include <asm/cacheflush.h> +#include <asm/cpu-features.h> +#include <asm/fpu.h> +#include <asm/ucontext.h> +#include <asm/vdso.h> + +#ifdef DEBUG_SIG +# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args) +#else +# define DEBUGP(fmt, args...) +#endif + +/* Make sure we will not lose FPU ownership */ +#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); }) +#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); }) + +/* Assembly functions to move context to/from the FPU */ +extern asmlinkage int +_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); +extern asmlinkage int +_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); + +struct rt_sigframe { + struct siginfo rs_info; + struct ucontext rs_uctx; +}; + +struct _ctx_layout { + struct sctx_info *addr; + unsigned int size; +}; + +struct extctx_layout { + unsigned long size; + unsigned int flags; + struct _ctx_layout fpu; + struct _ctx_layout end; +}; + +static void __user *get_ctx_through_ctxinfo(struct sctx_info *info) +{ + return (void __user *)((char *)info + sizeof(struct sctx_info)); +} + +/* + * Thread saved context copy to/from a signal context presumed to be on the + * user stack, and therefore accessed with appropriate macros from uaccess.h. + */ +static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx) +{ + int i; + int err = 0; + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + for (i = 0; i < NUM_FPU_REGS; i++) { + err |= + __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), + ®s[i]); + } + err |= __put_user(current->thread.fpu.fcc, fcc); + err |= __put_user(current->thread.fpu.fcsr, fcsr); + + return err; +} + +static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx) +{ + int i; + int err = 0; + u64 fpr_val; + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + for (i = 0; i < NUM_FPU_REGS; i++) { + err |= __get_user(fpr_val, ®s[i]); + set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); + } + err |= __get_user(current->thread.fpu.fcc, fcc); + err |= __get_user(current->thread.fpu.fcsr, fcsr); + + return err; +} + +/* + * Wrappers for the assembly _{save,restore}_fp_context functions. + */ +static int save_hw_fpu_context(struct fpu_context __user *ctx) +{ + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + return _save_fp_context(regs, fcc, fcsr); +} + +static int restore_hw_fpu_context(struct fpu_context __user *ctx) +{ + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + return _restore_fp_context(regs, fcc, fcsr); +} + +static int fcsr_pending(unsigned int __user *fcsr) +{ + int err, sig = 0; + unsigned int csr, enabled; + + err = __get_user(csr, fcsr); + enabled = ((csr & FPU_CSR_ALL_E) << 24); + /* + * If the signal handler set some FPU exceptions, clear it and + * send SIGFPE. + */ + if (csr & enabled) { + csr &= ~enabled; + err |= __put_user(csr, fcsr); + sig = SIGFPE; + } + return err ?: sig; +} + +/* + * Helper routines + */ +static int protected_save_fpu_context(struct extctx_layout *extctx) +{ + int err = 0; + struct sctx_info __user *info = extctx->fpu.addr; + struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info); + uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs; + uint64_t __user *fcc = &fpu_ctx->fcc; + uint32_t __user *fcsr = &fpu_ctx->fcsr; + + while (1) { + lock_fpu_owner(); + if (is_fpu_owner()) + err = save_hw_fpu_context(fpu_ctx); + else + err = copy_fpu_to_sigcontext(fpu_ctx); + unlock_fpu_owner(); + + err |= __put_user(FPU_CTX_MAGIC, &info->magic); + err |= __put_user(extctx->fpu.size, &info->size); + + if (likely(!err)) + break; + /* Touch the FPU context and try again */ + err = __put_user(0, ®s[0]) | + __put_user(0, ®s[31]) | + __put_user(0, fcc) | + __put_user(0, fcsr); + if (err) + return err; /* really bad sigcontext */ + } + + return err; +} + +static int protected_restore_fpu_context(struct extctx_layout *extctx) +{ + int err = 0, sig = 0, tmp __maybe_unused; + struct sctx_info __user *info = extctx->fpu.addr; + struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info); + uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs; + uint64_t __user *fcc = &fpu_ctx->fcc; + uint32_t __user *fcsr = &fpu_ctx->fcsr; + + err = sig = fcsr_pending(fcsr); + if (err < 0) + return err; + + while (1) { + lock_fpu_owner(); + if (is_fpu_owner()) + err = restore_hw_fpu_context(fpu_ctx); + else + err = copy_fpu_from_sigcontext(fpu_ctx); + unlock_fpu_owner(); + + if (likely(!err)) + break; + /* Touch the FPU context and try again */ + err = __get_user(tmp, ®s[0]) | + __get_user(tmp, ®s[31]) | + __get_user(tmp, fcc) | + __get_user(tmp, fcsr); + if (err) + break; /* really bad sigcontext */ + } + + return err ?: sig; +} + +static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, + struct extctx_layout *extctx) +{ + int i, err = 0; + struct sctx_info __user *info; + + err |= __put_user(regs->csr_era, &sc->sc_pc); + err |= __put_user(extctx->flags, &sc->sc_flags); + + err |= __put_user(0, &sc->sc_regs[0]); + for (i = 1; i < 32; i++) + err |= __put_user(regs->regs[i], &sc->sc_regs[i]); + + if (extctx->fpu.addr) + err |= protected_save_fpu_context(extctx); + + /* Set the "end" magic */ + info = (struct sctx_info *)extctx->end.addr; + err |= __put_user(0, &info->magic); + err |= __put_user(0, &info->size); + + return err; +} + +static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx) +{ + int err = 0; + unsigned int magic, size; + struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext; + + while(1) { + err |= __get_user(magic, &info->magic); + err |= __get_user(size, &info->size); + if (err) + return err; + + switch (magic) { + case 0: /* END */ + goto done; + + case FPU_CTX_MAGIC: + if (size < (sizeof(struct sctx_info) + + sizeof(struct fpu_context))) + goto invalid; + extctx->fpu.addr = info; + break; + + default: + goto invalid; + } + + info = (struct sctx_info *)((char *)info + size); + } + +done: + return 0; + +invalid: + return -EINVAL; +} + +static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int i, err = 0; + struct extctx_layout extctx; + + memset(&extctx, 0, sizeof(struct extctx_layout)); + + err = __get_user(extctx.flags, &sc->sc_flags); + if (err) + goto bad; + + err = parse_extcontext(sc, &extctx); + if (err) + goto bad; + + conditional_used_math(extctx.flags & SC_USED_FP); + + /* + * The signal handler may have used FPU; give it up if the program + * doesn't want it following sigreturn. + */ + if (!(extctx.flags & SC_USED_FP)) + lose_fpu(0); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + err |= __get_user(regs->csr_era, &sc->sc_pc); + for (i = 1; i < 32; i++) + err |= __get_user(regs->regs[i], &sc->sc_regs[i]); + + if (extctx.fpu.addr) + err |= protected_restore_fpu_context(&extctx); + +bad: + return err; +} + +static unsigned int handle_flags(void) +{ + unsigned int flags = 0; + + flags = used_math() ? SC_USED_FP : 0; + + switch (current->thread.error_code) { + case 1: + flags |= SC_ADDRERR_RD; + break; + case 2: + flags |= SC_ADDRERR_WR; + break; + } + + return flags; +} + +static unsigned long extframe_alloc(struct extctx_layout *extctx, + struct _ctx_layout *layout, + size_t size, unsigned int align, unsigned long base) +{ + unsigned long new_base = base - size; + + new_base = round_down(new_base, (align < 16 ? 16 : align)); + new_base -= sizeof(struct sctx_info); + + layout->addr = (void *)new_base; + layout->size = (unsigned int)(base - new_base); + extctx->size += layout->size; + + return new_base; +} + +static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp) +{ + unsigned long new_sp = sp; + + memset(extctx, 0, sizeof(struct extctx_layout)); + + extctx->flags = handle_flags(); + + /* Grow down, alloc "end" context info first. */ + new_sp -= sizeof(struct sctx_info); + extctx->end.addr = (void *)new_sp; + extctx->end.size = (unsigned int)sizeof(struct sctx_info); + extctx->size += extctx->end.size; + + if (extctx->flags & SC_USED_FP) { + if (cpu_has_fpu) + new_sp = extframe_alloc(extctx, &extctx->fpu, + sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp); + } + + return new_sp; +} + +void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, + struct extctx_layout *extctx) +{ + unsigned long sp; + + /* Default to using normal stack */ + sp = regs->regs[3]; + + /* + * If we are on the alternate signal stack and would overflow it, don't. + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (on_sig_stack(sp) && + !likely(on_sig_stack(sp - sizeof(struct rt_sigframe)))) + return (void __user __force *)(-1UL); + + sp = sigsp(sp, ksig); + sp = round_down(sp, 16); + sp = setup_extcontext(extctx, sp); + sp -= sizeof(struct rt_sigframe); + + if (!IS_ALIGNED(sp, 16)) + BUG(); + + return (void __user *)sp; +} + +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ + +asmlinkage long sys_rt_sigreturn(void) +{ + int sig; + sigset_t set; + struct pt_regs *regs; + struct rt_sigframe __user *frame; + + regs = current_pt_regs(); + frame = (struct rt_sigframe __user *)regs->regs[3]; + if (!access_ok(frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set))) + goto badframe; + + set_current_blocked(&set); + + sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext); + if (sig < 0) + goto badframe; + else if (sig) + force_sig(sig); + + regs->regs[0] = 0; /* No syscall restarting */ + if (restore_altstack(&frame->rs_uctx.uc_stack)) + goto badframe; + + return regs->regs[4]; + +badframe: + force_sig(SIGSEGV); + return 0; +} + +static int setup_rt_frame(void *sig_return, struct ksignal *ksig, + struct pt_regs *regs, sigset_t *set) +{ + int err = 0; + struct extctx_layout extctx; + struct rt_sigframe __user *frame; + + frame = get_sigframe(ksig, regs, &extctx); + if (!access_ok(frame, sizeof(*frame) + extctx.size)) + return -EFAULT; + + /* Create siginfo. */ + err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->rs_uctx.uc_flags); + err |= __put_user(NULL, &frame->rs_uctx.uc_link); + err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]); + err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx); + err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set)); + + if (err) + return -EFAULT; + + /* + * Arguments to signal handler: + * + * a0 = signal number + * a1 = pointer to siginfo + * a2 = pointer to ucontext + * + * c0_era point to the signal handler, $r3 (sp) points to + * the struct rt_sigframe. + */ + regs->regs[4] = ksig->sig; + regs->regs[5] = (unsigned long) &frame->rs_info; + regs->regs[6] = (unsigned long) &frame->rs_uctx; + regs->regs[3] = (unsigned long) frame; + regs->regs[1] = (unsigned long) sig_return; + regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler; + + DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", + current->comm, current->pid, + frame, regs->csr_era, regs->regs[1]); + + return 0; +} + +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + int ret; + sigset_t *oldset = sigmask_to_save(); + void *vdso = current->mm->context.vdso; + + /* Are we from a system call? */ + if (regs->regs[0]) { + switch (regs->regs[4]) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs->regs[4] = -EINTR; + break; + case -ERESTARTSYS: + if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { + regs->regs[4] = -EINTR; + break; + } + fallthrough; + case -ERESTARTNOINTR: + regs->regs[4] = regs->orig_a0; + regs->csr_era -= 4; + } + + regs->regs[0] = 0; /* Don't deal with this again. */ + } + + rseq_signal_deliver(ksig, regs); + + ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset); + + signal_setup_done(ret, ksig, 0); +} + +void arch_do_signal_or_restart(struct pt_regs *regs) +{ + struct ksignal ksig; + + if (get_signal(&ksig)) { + /* Whee! Actually deliver the signal. */ + handle_signal(&ksig, regs); + return; + } + + /* Are we from a system call? */ + if (regs->regs[0]) { + switch (regs->regs[4]) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + regs->regs[4] = regs->orig_a0; + regs->csr_era -= 4; + break; + + case -ERESTART_RESTARTBLOCK: + regs->regs[4] = regs->orig_a0; + regs->regs[11] = __NR_restart_syscall; + regs->csr_era -= 4; + break; + } + regs->regs[0] = 0; /* Don't deal with this again. */ + } + + /* + * If there's no signal to deliver, we just put the saved sigmask + * back + */ + restore_saved_sigmask(); +} diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c new file mode 100644 index 000000000..434bfc1cd --- /dev/null +++ b/arch/loongarch/kernel/smp.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 2000, 2001 Kanoj Sarcar + * Copyright (C) 2000, 2001 Ralf Baechle + * Copyright (C) 2000, 2001 Silicon Graphics, Inc. + * Copyright (C) 2000, 2001, 2003 Broadcom Corporation + */ +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/seq_file.h> +#include <linux/smp.h> +#include <linux/threads.h> +#include <linux/export.h> +#include <linux/time.h> +#include <linux/tracepoint.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task_stack.h> + +#include <asm/cpu.h> +#include <asm/idle.h> +#include <asm/loongson.h> +#include <asm/mmu_context.h> +#include <asm/numa.h> +#include <asm/processor.h> +#include <asm/setup.h> +#include <asm/time.h> + +int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ +EXPORT_SYMBOL(__cpu_number_map); + +int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_logical_map); + +/* Number of threads (siblings) per CPU core */ +int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); + +/* Representing the threads (siblings) of each logical CPU */ +cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_sibling_map); + +/* Representing the core map of multi-core chips of each logical CPU */ +cpumask_t cpu_core_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_core_map); + +static DECLARE_COMPLETION(cpu_starting); +static DECLARE_COMPLETION(cpu_running); + +/* + * A logcal cpu mask containing only one VPE per core to + * reduce the number of IPIs on large MT systems. + */ +cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_foreign_map); + +/* representing cpus for which sibling maps can be computed */ +static cpumask_t cpu_sibling_setup_map; + +/* representing cpus for which core maps can be computed */ +static cpumask_t cpu_core_setup_map; + +struct secondary_data cpuboot_data; +static DEFINE_PER_CPU(int, cpu_state); + +enum ipi_msg_type { + IPI_RESCHEDULE, + IPI_CALL_FUNCTION, +}; + +static const char *ipi_types[NR_IPI] __tracepoint_string = { + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNCTION] = "Function call interrupts", +}; + +void show_ipi_list(struct seq_file *p, int prec) +{ + unsigned int cpu, i; + + for (i = 0; i < NR_IPI; i++) { + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); + for_each_online_cpu(cpu) + seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]); + seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]); + } +} + +/* Send mailbox buffer via Mail_Send */ +static void csr_mail_send(uint64_t data, int cpu, int mailbox) +{ + uint64_t val; + + /* Send high 32 bits */ + val = IOCSR_MBUF_SEND_BLOCKING; + val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT); + val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT); + val |= (data & IOCSR_MBUF_SEND_H32_MASK); + iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND); + + /* Send low 32 bits */ + val = IOCSR_MBUF_SEND_BLOCKING; + val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT); + val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT); + val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT); + iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND); +}; + +static u32 ipi_read_clear(int cpu) +{ + u32 action; + + /* Load the ipi register to figure out what we're supposed to do */ + action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS); + /* Clear the ipi register to clear the interrupt */ + iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR); + smp_mb(); + + return action; +} + +static void ipi_write_action(int cpu, u32 action) +{ + unsigned int irq = 0; + + while ((irq = ffs(action))) { + uint32_t val = IOCSR_IPI_SEND_BLOCKING; + + val |= (irq - 1); + val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); + iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); + action &= ~BIT(irq - 1); + } +} + +void loongson_send_ipi_single(int cpu, unsigned int action) +{ + ipi_write_action(cpu_logical_map(cpu), (u32)action); +} + +void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu(i, mask) + ipi_write_action(cpu_logical_map(i), (u32)action); +} + +/* + * This function sends a 'reschedule' IPI to another CPU. + * it goes straight through and wastes no time serializing + * anything. Worst case is that we lose a reschedule ... + */ +void smp_send_reschedule(int cpu) +{ + loongson_send_ipi_single(cpu, SMP_RESCHEDULE); +} +EXPORT_SYMBOL_GPL(smp_send_reschedule); + +irqreturn_t loongson_ipi_interrupt(int irq, void *dev) +{ + unsigned int action; + unsigned int cpu = smp_processor_id(); + + action = ipi_read_clear(cpu_logical_map(cpu)); + + if (action & SMP_RESCHEDULE) { + scheduler_ipi(); + per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++; + } + + if (action & SMP_CALL_FUNCTION) { + generic_smp_call_function_interrupt(); + per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++; + } + + return IRQ_HANDLED; +} + +void __init loongson_smp_setup(void) +{ + cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; + cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; + + iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN); + pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus); +} + +void __init loongson_prepare_cpus(unsigned int max_cpus) +{ + int i = 0; + + for (i = 0; i < loongson_sysconf.nr_cpus; i++) { + set_cpu_present(i, true); + csr_mail_send(0, __cpu_logical_map[i], 0); + } + + per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; +} + +/* + * Setup the PC, SP, and TP of a secondary processor and start it running! + */ +void loongson_boot_secondary(int cpu, struct task_struct *idle) +{ + unsigned long entry; + + pr_info("Booting CPU#%d...\n", cpu); + + entry = __pa_symbol((unsigned long)&smpboot_entry); + cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle); + cpuboot_data.thread_info = (unsigned long)task_thread_info(idle); + + csr_mail_send(entry, cpu_logical_map(cpu), 0); + + loongson_send_ipi_single(cpu, SMP_BOOT_CPU); +} + +/* + * SMP init and finish on secondary CPUs + */ +void loongson_init_secondary(void) +{ + unsigned int cpu = smp_processor_id(); + unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | + ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER; + + change_csr_ecfg(ECFG0_IM, imask); + + iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN); + +#ifdef CONFIG_NUMA + numa_add_cpu(cpu); +#endif + per_cpu(cpu_state, cpu) = CPU_ONLINE; + cpu_data[cpu].core = + cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; + cpu_data[cpu].package = + cpu_logical_map(cpu) / loongson_sysconf.cores_per_package; +} + +void loongson_smp_finish(void) +{ + local_irq_enable(); + iocsr_write64(0, LOONGARCH_IOCSR_MBUF0); + pr_info("CPU#%d finished\n", smp_processor_id()); +} + +#ifdef CONFIG_HOTPLUG_CPU + +int loongson_cpu_disable(void) +{ + unsigned long flags; + unsigned int cpu = smp_processor_id(); + + if (io_master(cpu)) + return -EBUSY; + +#ifdef CONFIG_NUMA + numa_remove_cpu(cpu); +#endif + set_cpu_online(cpu, false); + calculate_cpu_foreign_map(); + local_irq_save(flags); + irq_migrate_all_off_this_cpu(); + clear_csr_ecfg(ECFG0_IM); + local_irq_restore(flags); + local_flush_tlb_all(); + + return 0; +} + +void loongson_cpu_die(unsigned int cpu) +{ + while (per_cpu(cpu_state, cpu) != CPU_DEAD) + cpu_relax(); + + mb(); +} + +void play_dead(void) +{ + register uint64_t addr; + register void (*init_fn)(void); + + idle_task_exit(); + local_irq_enable(); + set_csr_ecfg(ECFGF_IPI); + __this_cpu_write(cpu_state, CPU_DEAD); + + __smp_mb(); + do { + __asm__ __volatile__("idle 0\n\t"); + addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0); + } while (addr == 0); + + init_fn = (void *)TO_CACHE(addr); + iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR); + + init_fn(); + unreachable(); +} + +#endif + +/* + * Power management + */ +#ifdef CONFIG_PM + +static int loongson_ipi_suspend(void) +{ + return 0; +} + +static void loongson_ipi_resume(void) +{ + iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN); +} + +static struct syscore_ops loongson_ipi_syscore_ops = { + .resume = loongson_ipi_resume, + .suspend = loongson_ipi_suspend, +}; + +/* + * Enable boot cpu ipi before enabling nonboot cpus + * during syscore_resume. + */ +static int __init ipi_pm_init(void) +{ + register_syscore_ops(&loongson_ipi_syscore_ops); + return 0; +} + +core_initcall(ipi_pm_init); +#endif + +static inline void set_cpu_sibling_map(int cpu) +{ + int i; + + cpumask_set_cpu(cpu, &cpu_sibling_setup_map); + + if (smp_num_siblings <= 1) + cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); + else { + for_each_cpu(i, &cpu_sibling_setup_map) { + if (cpus_are_siblings(cpu, i)) { + cpumask_set_cpu(i, &cpu_sibling_map[cpu]); + cpumask_set_cpu(cpu, &cpu_sibling_map[i]); + } + } + } +} + +static inline void set_cpu_core_map(int cpu) +{ + int i; + + cpumask_set_cpu(cpu, &cpu_core_setup_map); + + for_each_cpu(i, &cpu_core_setup_map) { + if (cpu_data[cpu].package == cpu_data[i].package) { + cpumask_set_cpu(i, &cpu_core_map[cpu]); + cpumask_set_cpu(cpu, &cpu_core_map[i]); + } + } +} + +/* + * Calculate a new cpu_foreign_map mask whenever a + * new cpu appears or disappears. + */ +void calculate_cpu_foreign_map(void) +{ + int i, k, core_present; + cpumask_t temp_foreign_map; + + /* Re-calculate the mask */ + cpumask_clear(&temp_foreign_map); + for_each_online_cpu(i) { + core_present = 0; + for_each_cpu(k, &temp_foreign_map) + if (cpus_are_siblings(i, k)) + core_present = 1; + if (!core_present) + cpumask_set_cpu(i, &temp_foreign_map); + } + + for_each_online_cpu(i) + cpumask_andnot(&cpu_foreign_map[i], + &temp_foreign_map, &cpu_sibling_map[i]); +} + +/* Preload SMP state for boot cpu */ +void smp_prepare_boot_cpu(void) +{ + unsigned int cpu, node, rr_node; + + set_cpu_possible(0, true); + set_cpu_online(0, true); + set_my_cpu_offset(per_cpu_offset(0)); + + rr_node = first_node(node_online_map); + for_each_possible_cpu(cpu) { + node = early_cpu_to_node(cpu); + + /* + * The mapping between present cpus and nodes has been + * built during MADT and SRAT parsing. + * + * If possible cpus = present cpus here, early_cpu_to_node + * will return valid node. + * + * If possible cpus > present cpus here (e.g. some possible + * cpus will be added by cpu-hotplug later), for possible but + * not present cpus, early_cpu_to_node will return NUMA_NO_NODE, + * and we just map them to online nodes in round-robin way. + * Once hotplugged, new correct mapping will be built for them. + */ + if (node != NUMA_NO_NODE) + set_cpu_numa_node(cpu, node); + else { + set_cpu_numa_node(cpu, rr_node); + rr_node = next_node_in(rr_node, node_online_map); + } + } +} + +/* called from main before smp_init() */ +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + init_new_context(current, &init_mm); + current_thread_info()->cpu = 0; + loongson_prepare_cpus(max_cpus); + set_cpu_sibling_map(0); + set_cpu_core_map(0); + calculate_cpu_foreign_map(); +#ifndef CONFIG_HOTPLUG_CPU + init_cpu_present(cpu_possible_mask); +#endif +} + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + loongson_boot_secondary(cpu, tidle); + + /* Wait for CPU to start and be ready to sync counters */ + if (!wait_for_completion_timeout(&cpu_starting, + msecs_to_jiffies(5000))) { + pr_crit("CPU%u: failed to start\n", cpu); + return -EIO; + } + + /* Wait for CPU to finish startup & mark itself online before return */ + wait_for_completion(&cpu_running); + + return 0; +} + +/* + * First C code run on the secondary CPUs after being started up by + * the master. + */ +asmlinkage void start_secondary(void) +{ + unsigned int cpu; + + sync_counter(); + cpu = raw_smp_processor_id(); + set_my_cpu_offset(per_cpu_offset(cpu)); + rcu_cpu_starting(cpu); + + cpu_probe(); + constant_clockevent_init(); + loongson_init_secondary(); + + set_cpu_sibling_map(cpu); + set_cpu_core_map(cpu); + + notify_cpu_starting(cpu); + + /* Notify boot CPU that we're starting */ + complete(&cpu_starting); + + /* The CPU is running, now mark it online */ + set_cpu_online(cpu, true); + + calculate_cpu_foreign_map(); + + /* + * Notify boot CPU that we're up & online and it can safely return + * from __cpu_up() + */ + complete(&cpu_running); + + /* + * irq will be enabled in loongson_smp_finish(), enabling it too + * early is dangerous. + */ + WARN_ON_ONCE(!irqs_disabled()); + loongson_smp_finish(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +static void stop_this_cpu(void *dummy) +{ + set_cpu_online(smp_processor_id(), false); + calculate_cpu_foreign_map(); + local_irq_disable(); + while (true); +} + +void smp_send_stop(void) +{ + smp_call_function(stop_this_cpu, NULL, 0); +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return 0; +} + +static void flush_tlb_all_ipi(void *info) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + on_each_cpu(flush_tlb_all_ipi, NULL, 1); +} + +static void flush_tlb_mm_ipi(void *mm) +{ + local_flush_tlb_mm((struct mm_struct *)mm); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + if (atomic_read(&mm->mm_users) == 0) + return; /* happens as a result of exit_mmap() */ + + preempt_disable(); + + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1); + } else { + unsigned int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) + cpu_context(cpu, mm) = 0; + } + local_flush_tlb_mm(mm); + } + + preempt_enable(); +} + +struct flush_tlb_data { + struct vm_area_struct *vma; + unsigned long addr1; + unsigned long addr2; +}; + +static void flush_tlb_range_ipi(void *info) +{ + struct flush_tlb_data *fd = info; + + local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + struct flush_tlb_data fd = { + .vma = vma, + .addr1 = start, + .addr2 = end, + }; + + on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1); + } else { + unsigned int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) + cpu_context(cpu, mm) = 0; + } + local_flush_tlb_range(vma, start, end); + } + preempt_enable(); +} + +static void flush_tlb_kernel_range_ipi(void *info) +{ + struct flush_tlb_data *fd = info; + + local_flush_tlb_kernel_range(fd->addr1, fd->addr2); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct flush_tlb_data fd = { + .addr1 = start, + .addr2 = end, + }; + + on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); +} + +static void flush_tlb_page_ipi(void *info) +{ + struct flush_tlb_data *fd = info; + + local_flush_tlb_page(fd->vma, fd->addr1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + preempt_disable(); + if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { + struct flush_tlb_data fd = { + .vma = vma, + .addr1 = page, + }; + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1); + } else { + unsigned int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) + cpu_context(cpu, vma->vm_mm) = 0; + } + local_flush_tlb_page(vma, page); + } + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_page); + +static void flush_tlb_one_ipi(void *info) +{ + unsigned long vaddr = (unsigned long) info; + + local_flush_tlb_one(vaddr); +} + +void flush_tlb_one(unsigned long vaddr) +{ + on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1); +} +EXPORT_SYMBOL(flush_tlb_one); diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c new file mode 100644 index 000000000..3a690f96f --- /dev/null +++ b/arch/loongarch/kernel/stacktrace.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stack trace management functions + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include <linux/sched.h> +#include <linux/stacktrace.h> +#include <linux/uaccess.h> + +#include <asm/stacktrace.h> +#include <asm/unwind.h> + +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs) +{ + unsigned long addr; + struct pt_regs dummyregs; + struct unwind_state state; + + regs = &dummyregs; + + if (task == current) { + regs->regs[3] = (unsigned long)__builtin_frame_address(0); + regs->csr_era = (unsigned long)__builtin_return_address(0); + } else { + regs->regs[3] = thread_saved_fp(task); + regs->csr_era = thread_saved_ra(task); + } + + regs->regs[1] = 0; + for (unwind_start(&state, task, regs); + !unwind_done(&state); unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + if (!addr || !consume_entry(cookie, addr)) + break; + } +} + +static int +copy_stack_frame(unsigned long fp, struct stack_frame *frame) +{ + int ret = 1; + unsigned long err; + unsigned long __user *user_frame_tail; + + user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame)); + if (!access_ok(user_frame_tail, sizeof(*frame))) + return 0; + + pagefault_disable(); + err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame))); + if (err || (unsigned long)user_frame_tail >= frame->fp) + ret = 0; + pagefault_enable(); + + return ret; +} + +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs) +{ + unsigned long fp = regs->regs[22]; + + while (fp && !((unsigned long)fp & 0xf)) { + struct stack_frame frame; + + frame.fp = 0; + frame.ra = 0; + if (!copy_stack_frame(fp, &frame)) + break; + if (!frame.ra) + break; + if (!consume_entry(cookie, frame.ra)) + break; + fp = frame.fp; + } +} diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S new file mode 100644 index 000000000..202a163cb --- /dev/null +++ b/arch/loongarch/kernel/switch.S @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/asm-offsets.h> +#include <asm/loongarch.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> +#include <asm/thread_info.h> + +/* + * task_struct *__switch_to(task_struct *prev, task_struct *next, + * struct thread_info *next_ti) + */ + .align 5 +SYM_FUNC_START(__switch_to) + csrrd t1, LOONGARCH_CSR_PRMD + stptr.d t1, a0, THREAD_CSRPRMD + + cpu_save_nonscratch a0 + stptr.d ra, a0, THREAD_REG01 + stptr.d a3, a0, THREAD_SCHED_RA + stptr.d a4, a0, THREAD_SCHED_CFA + move tp, a2 + cpu_restore_nonscratch a1 + + li.w t0, _THREAD_SIZE + PTR_ADD t0, t0, tp + set_saved_sp t0, t1, t2 + + ldptr.d t1, a1, THREAD_CSRPRMD + csrwr t1, LOONGARCH_CSR_PRMD + + jr ra +SYM_FUNC_END(__switch_to) diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c new file mode 100644 index 000000000..3fc4211db --- /dev/null +++ b/arch/loongarch/kernel/syscall.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/capability.h> +#include <linux/entry-common.h> +#include <linux/errno.h> +#include <linux/linkage.h> +#include <linux/syscalls.h> +#include <linux/unistd.h> + +#include <asm/asm.h> +#include <asm/signal.h> +#include <asm/switch_to.h> +#include <asm-generic/syscalls.h> + +#undef __SYSCALL +#define __SYSCALL(nr, call) [nr] = (call), + +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, + prot, unsigned long, flags, unsigned long, fd, off_t, offset) +{ + if (offset & ~PAGE_MASK) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); +} + +void *sys_call_table[__NR_syscalls] = { + [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#include <asm/unistd.h> +}; + +typedef long (*sys_call_fn)(unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, unsigned long); + +void noinstr do_syscall(struct pt_regs *regs) +{ + unsigned long nr; + sys_call_fn syscall_fn; + + nr = regs->regs[11]; + /* Set for syscall restarting */ + if (nr < NR_syscalls) + regs->regs[0] = nr + 1; + + regs->csr_era += 4; + regs->orig_a0 = regs->regs[4]; + regs->regs[4] = -ENOSYS; + + nr = syscall_enter_from_user_mode(regs, nr); + + if (nr < NR_syscalls) { + syscall_fn = sys_call_table[nr]; + regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6], + regs->regs[7], regs->regs[8], regs->regs[9]); + } + + syscall_exit_to_user_mode(regs); +} diff --git a/arch/loongarch/kernel/sysrq.c b/arch/loongarch/kernel/sysrq.c new file mode 100644 index 000000000..366baef72 --- /dev/null +++ b/arch/loongarch/kernel/sysrq.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * LoongArch specific sysrq operations. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/sysrq.h> +#include <linux/workqueue.h> + +#include <asm/cpu-features.h> +#include <asm/tlb.h> + +/* + * Dump TLB entries on all CPUs. + */ + +static DEFINE_SPINLOCK(show_lock); + +static void sysrq_tlbdump_single(void *dummy) +{ + unsigned long flags; + + spin_lock_irqsave(&show_lock, flags); + + pr_info("CPU%d:\n", smp_processor_id()); + dump_tlb_regs(); + pr_info("\n"); + dump_tlb_all(); + pr_info("\n"); + + spin_unlock_irqrestore(&show_lock, flags); +} + +#ifdef CONFIG_SMP +static void sysrq_tlbdump_othercpus(struct work_struct *dummy) +{ + smp_call_function(sysrq_tlbdump_single, NULL, 0); +} + +static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus); +#endif + +static void sysrq_handle_tlbdump(int key) +{ + sysrq_tlbdump_single(NULL); +#ifdef CONFIG_SMP + schedule_work(&sysrq_tlbdump); +#endif +} + +static struct sysrq_key_op sysrq_tlbdump_op = { + .handler = sysrq_handle_tlbdump, + .help_msg = "show-tlbs(x)", + .action_msg = "Show TLB entries", + .enable_mask = SYSRQ_ENABLE_DUMP, +}; + +static int __init loongarch_sysrq_init(void) +{ + return register_sysrq_key('x', &sysrq_tlbdump_op); +} +arch_initcall(loongarch_sysrq_init); diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c new file mode 100644 index 000000000..150df6e17 --- /dev/null +++ b/arch/loongarch/kernel/time.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Common time service routines for LoongArch machines. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/sched_clock.h> +#include <linux/spinlock.h> + +#include <asm/cpu-features.h> +#include <asm/loongarch.h> +#include <asm/time.h> + +u64 cpu_clock_freq; +EXPORT_SYMBOL(cpu_clock_freq); +u64 const_clock_freq; +EXPORT_SYMBOL(const_clock_freq); + +static DEFINE_RAW_SPINLOCK(state_lock); +static DEFINE_PER_CPU(struct clock_event_device, constant_clockevent_device); + +static void constant_event_handler(struct clock_event_device *dev) +{ +} + +irqreturn_t constant_timer_interrupt(int irq, void *data) +{ + int cpu = smp_processor_id(); + struct clock_event_device *cd; + + /* Clear Timer Interrupt */ + write_csr_tintclear(CSR_TINTCLR_TI); + cd = &per_cpu(constant_clockevent_device, cpu); + cd->event_handler(cd); + + return IRQ_HANDLED; +} + +static int constant_set_state_oneshot(struct clock_event_device *evt) +{ + unsigned long timer_config; + + raw_spin_lock(&state_lock); + + timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config |= CSR_TCFG_EN; + timer_config &= ~CSR_TCFG_PERIOD; + csr_write64(timer_config, LOONGARCH_CSR_TCFG); + + raw_spin_unlock(&state_lock); + + return 0; +} + +static int constant_set_state_periodic(struct clock_event_device *evt) +{ + unsigned long period; + unsigned long timer_config; + + raw_spin_lock(&state_lock); + + period = const_clock_freq / HZ; + timer_config = period & CSR_TCFG_VAL; + timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN); + csr_write64(timer_config, LOONGARCH_CSR_TCFG); + + raw_spin_unlock(&state_lock); + + return 0; +} + +static int constant_set_state_shutdown(struct clock_event_device *evt) +{ + unsigned long timer_config; + + raw_spin_lock(&state_lock); + + timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config &= ~CSR_TCFG_EN; + csr_write64(timer_config, LOONGARCH_CSR_TCFG); + + raw_spin_unlock(&state_lock); + + return 0; +} + +static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt) +{ + unsigned long timer_config; + + delta &= CSR_TCFG_VAL; + timer_config = delta | CSR_TCFG_EN; + csr_write64(timer_config, LOONGARCH_CSR_TCFG); + + return 0; +} + +static unsigned long __init get_loops_per_jiffy(void) +{ + unsigned long lpj = (unsigned long)const_clock_freq; + + do_div(lpj, HZ); + + return lpj; +} + +static long init_timeval; + +void sync_counter(void) +{ + /* Ensure counter begin at 0 */ + csr_write64(-init_timeval, LOONGARCH_CSR_CNTC); +} + +static int get_timer_irq(void) +{ + struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + + if (d) + return irq_create_mapping(d, EXCCODE_TIMER - EXCCODE_INT_START); + + return -EINVAL; +} + +int constant_clockevent_init(void) +{ + unsigned int cpu = smp_processor_id(); + unsigned long min_delta = 0x600; + unsigned long max_delta = (1UL << 48) - 1; + struct clock_event_device *cd; + static int irq = 0, timer_irq_installed = 0; + + if (!timer_irq_installed) { + irq = get_timer_irq(); + if (irq < 0) + pr_err("Failed to map irq %d (timer)\n", irq); + } + + cd = &per_cpu(constant_clockevent_device, cpu); + + cd->name = "Constant"; + cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_PERCPU; + + cd->irq = irq; + cd->rating = 320; + cd->cpumask = cpumask_of(cpu); + cd->set_state_oneshot = constant_set_state_oneshot; + cd->set_state_oneshot_stopped = constant_set_state_shutdown; + cd->set_state_periodic = constant_set_state_periodic; + cd->set_state_shutdown = constant_set_state_shutdown; + cd->set_next_event = constant_timer_next_event; + cd->event_handler = constant_event_handler; + + clockevents_config_and_register(cd, const_clock_freq, min_delta, max_delta); + + if (timer_irq_installed) + return 0; + + timer_irq_installed = 1; + + sync_counter(); + + if (request_irq(irq, constant_timer_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL)) + pr_err("Failed to request irq %d (timer)\n", irq); + + lpj_fine = get_loops_per_jiffy(); + pr_info("Constant clock event device register\n"); + + return 0; +} + +static u64 read_const_counter(struct clocksource *clk) +{ + return drdtime(); +} + +static u64 native_sched_clock(void) +{ + return read_const_counter(NULL); +} + +static struct clocksource clocksource_const = { + .name = "Constant", + .rating = 400, + .read = read_const_counter, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .vdso_clock_mode = VDSO_CLOCKMODE_CPU, +}; + +int __init constant_clocksource_init(void) +{ + int res; + unsigned long freq = const_clock_freq; + + res = clocksource_register_hz(&clocksource_const, freq); + + sched_clock_register(native_sched_clock, 64, freq); + + pr_info("Constant clock source device register\n"); + + return res; +} + +void __init time_init(void) +{ + if (!cpu_has_cpucfg) + const_clock_freq = cpu_clock_freq; + else + const_clock_freq = calc_const_freq(); + + init_timeval = drdtime() - csr_read64(LOONGARCH_CSR_CNTC); + + constant_clockevent_init(); + constant_clocksource_init(); +} diff --git a/arch/loongarch/kernel/topology.c b/arch/loongarch/kernel/topology.c new file mode 100644 index 000000000..caa7cd859 --- /dev/null +++ b/arch/loongarch/kernel/topology.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/node.h> +#include <linux/nodemask.h> +#include <linux/percpu.h> +#include <asm/bootinfo.h> + +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +#ifdef CONFIG_HOTPLUG_CPU +int arch_register_cpu(int cpu) +{ + int ret; + struct cpu *c = &per_cpu(cpu_devices, cpu); + + c->hotpluggable = 1; + ret = register_cpu(c, cpu); + if (ret < 0) + pr_warn("register_cpu %d failed (%d)\n", cpu, ret); + + return ret; +} +EXPORT_SYMBOL(arch_register_cpu); + +void arch_unregister_cpu(int cpu) +{ + struct cpu *c = &per_cpu(cpu_devices, cpu); + + c->hotpluggable = 0; + unregister_cpu(c); +} +EXPORT_SYMBOL(arch_unregister_cpu); +#endif + +static int __init topology_init(void) +{ + int i, ret; + + for_each_present_cpu(i) { + struct cpu *c = &per_cpu(cpu_devices, i); + + c->hotpluggable = !io_master(i); + ret = register_cpu(c, i); + if (ret < 0) + pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret); + } + + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c new file mode 100644 index 000000000..1a4dce84e --- /dev/null +++ b/arch/loongarch/kernel/traps.c @@ -0,0 +1,738 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/context_tracking.h> +#include <linux/entry-common.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/module.h> +#include <linux/extable.h> +#include <linux/mm.h> +#include <linux/sched/mm.h> +#include <linux/sched/debug.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/kallsyms.h> +#include <linux/memblock.h> +#include <linux/interrupt.h> +#include <linux/ptrace.h> +#include <linux/kgdb.h> +#include <linux/kdebug.h> +#include <linux/kprobes.h> +#include <linux/notifier.h> +#include <linux/irq.h> +#include <linux/perf_event.h> + +#include <asm/addrspace.h> +#include <asm/bootinfo.h> +#include <asm/branch.h> +#include <asm/break.h> +#include <asm/cpu.h> +#include <asm/fpu.h> +#include <asm/loongarch.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/ptrace.h> +#include <asm/sections.h> +#include <asm/siginfo.h> +#include <asm/stacktrace.h> +#include <asm/tlb.h> +#include <asm/types.h> +#include <asm/unwind.h> + +#include "access-helper.h" + +extern asmlinkage void handle_ade(void); +extern asmlinkage void handle_ale(void); +extern asmlinkage void handle_sys(void); +extern asmlinkage void handle_bp(void); +extern asmlinkage void handle_ri(void); +extern asmlinkage void handle_fpu(void); +extern asmlinkage void handle_fpe(void); +extern asmlinkage void handle_lbt(void); +extern asmlinkage void handle_lsx(void); +extern asmlinkage void handle_lasx(void); +extern asmlinkage void handle_reserved(void); +extern asmlinkage void handle_watch(void); +extern asmlinkage void handle_vint(void); + +static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, + const char *loglvl, bool user) +{ + unsigned long addr; + struct unwind_state state; + struct pt_regs *pregs = (struct pt_regs *)regs; + + if (!task) + task = current; + + if (user_mode(regs)) + state.type = UNWINDER_GUESS; + + printk("%sCall Trace:", loglvl); + for (unwind_start(&state, task, pregs); + !unwind_done(&state); unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + print_ip_sym(loglvl, addr); + } + printk("%s\n", loglvl); +} + +static void show_stacktrace(struct task_struct *task, + const struct pt_regs *regs, const char *loglvl, bool user) +{ + int i; + const int field = 2 * sizeof(unsigned long); + unsigned long stackdata; + unsigned long *sp = (unsigned long *)regs->regs[3]; + + printk("%sStack :", loglvl); + i = 0; + while ((unsigned long) sp & (PAGE_SIZE - 1)) { + if (i && ((i % (64 / field)) == 0)) { + pr_cont("\n"); + printk("%s ", loglvl); + } + if (i > 39) { + pr_cont(" ..."); + break; + } + + if (__get_addr(&stackdata, sp++, user)) { + pr_cont(" (Bad stack address)"); + break; + } + + pr_cont(" %0*lx", field, stackdata); + i++; + } + pr_cont("\n"); + show_backtrace(task, regs, loglvl, user); +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + struct pt_regs regs; + + regs.csr_crmd = 0; + if (sp) { + regs.csr_era = 0; + regs.regs[1] = 0; + regs.regs[3] = (unsigned long)sp; + } else { + if (!task || task == current) + prepare_frametrace(®s); + else { + regs.csr_era = task->thread.reg01; + regs.regs[1] = 0; + regs.regs[3] = task->thread.reg03; + regs.regs[22] = task->thread.reg22; + } + } + + show_stacktrace(task, ®s, loglvl, false); +} + +static void show_code(unsigned int *pc, bool user) +{ + long i; + unsigned int insn; + + printk("Code:"); + + for(i = -3 ; i < 6 ; i++) { + if (__get_inst(&insn, pc + i, user)) { + pr_cont(" (Bad address in era)\n"); + break; + } + pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>')); + } + pr_cont("\n"); +} + +static void __show_regs(const struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + unsigned int excsubcode; + unsigned int exccode; + int i; + + show_regs_print_info(KERN_DEFAULT); + + /* + * Saved main processor registers + */ + for (i = 0; i < 32; ) { + if ((i % 4) == 0) + printk("$%2d :", i); + pr_cont(" %0*lx", field, regs->regs[i]); + + i++; + if ((i % 4) == 0) + pr_cont("\n"); + } + + /* + * Saved csr registers + */ + printk("era : %0*lx %pS\n", field, regs->csr_era, + (void *) regs->csr_era); + printk("ra : %0*lx %pS\n", field, regs->regs[1], + (void *) regs->regs[1]); + + printk("CSR crmd: %08lx ", regs->csr_crmd); + printk("CSR prmd: %08lx ", regs->csr_prmd); + printk("CSR euen: %08lx ", regs->csr_euen); + printk("CSR ecfg: %08lx ", regs->csr_ecfg); + printk("CSR estat: %08lx ", regs->csr_estat); + + pr_cont("\n"); + + exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT; + printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode); + + if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE) + printk("BadVA : %0*lx\n", field, regs->csr_badvaddr); + + printk("PrId : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0), + cpu_family_string()); +} + +void show_regs(struct pt_regs *regs) +{ + __show_regs((struct pt_regs *)regs); + dump_stack(); +} + +void show_registers(struct pt_regs *regs) +{ + __show_regs(regs); + print_modules(); + printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n", + current->comm, current->pid, current_thread_info(), current); + + show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs)); + show_code((void *)regs->csr_era, user_mode(regs)); + printk("\n"); +} + +static DEFINE_RAW_SPINLOCK(die_lock); + +void __noreturn die(const char *str, struct pt_regs *regs) +{ + static int die_counter; + int sig = SIGSEGV; + + oops_enter(); + + if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr, + SIGSEGV) == NOTIFY_STOP) + sig = 0; + + console_verbose(); + raw_spin_lock_irq(&die_lock); + bust_spinlocks(1); + + printk("%s[#%d]:\n", str, ++die_counter); + show_registers(regs); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + raw_spin_unlock_irq(&die_lock); + + oops_exit(); + + if (regs && kexec_should_crash(current)) + crash_kexec(regs); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) + panic("Fatal exception"); + + make_task_dead(sig); +} + +static inline void setup_vint_size(unsigned int size) +{ + unsigned int vs; + + vs = ilog2(size/4); + + if (vs == 0 || vs > 7) + panic("vint_size %d Not support yet", vs); + + csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG); +} + +/* + * Send SIGFPE according to FCSR Cause bits, which must have already + * been masked against Enable bits. This is impotant as Inexact can + * happen together with Overflow or Underflow, and `ptrace' can set + * any bits. + */ +void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr, + struct task_struct *tsk) +{ + int si_code = FPE_FLTUNK; + + if (fcsr & FPU_CSR_INV_X) + si_code = FPE_FLTINV; + else if (fcsr & FPU_CSR_DIV_X) + si_code = FPE_FLTDIV; + else if (fcsr & FPU_CSR_OVF_X) + si_code = FPE_FLTOVF; + else if (fcsr & FPU_CSR_UDF_X) + si_code = FPE_FLTUND; + else if (fcsr & FPU_CSR_INE_X) + si_code = FPE_FLTRES; + + force_sig_fault(SIGFPE, si_code, fault_addr); +} + +int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr) +{ + int si_code; + + switch (sig) { + case 0: + return 0; + + case SIGFPE: + force_fcsr_sig(fcsr, fault_addr, current); + return 1; + + case SIGBUS: + force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr); + return 1; + + case SIGSEGV: + mmap_read_lock(current->mm); + if (vma_lookup(current->mm, (unsigned long)fault_addr)) + si_code = SEGV_ACCERR; + else + si_code = SEGV_MAPERR; + mmap_read_unlock(current->mm); + force_sig_fault(SIGSEGV, si_code, fault_addr); + return 1; + + default: + force_sig(sig); + return 1; + } +} + +/* + * Delayed fp exceptions when doing a lazy ctx switch + */ +asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr) +{ + int sig; + void __user *fault_addr; + irqentry_state_t state = irqentry_enter(regs); + + if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr, + SIGFPE) == NOTIFY_STOP) + goto out; + + /* Clear FCSR.Cause before enabling interrupts */ + write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr)); + local_irq_enable(); + + die_if_kernel("FP exception in kernel code", regs); + + sig = SIGFPE; + fault_addr = (void __user *) regs->csr_era; + + /* Send a signal if required. */ + process_fpemu_return(sig, fault_addr, fcsr); + +out: + local_irq_disable(); + irqentry_exit(regs, state); +} + +asmlinkage void noinstr do_ade(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + + die_if_kernel("Kernel ade access", regs); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr); + + irqentry_exit(regs, state); +} + +asmlinkage void noinstr do_ale(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + + die_if_kernel("Kernel ale access", regs); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); + + irqentry_exit(regs, state); +} + +#ifdef CONFIG_GENERIC_BUG +int is_valid_bugaddr(unsigned long addr) +{ + return 1; +} +#endif /* CONFIG_GENERIC_BUG */ + +static void bug_handler(struct pt_regs *regs) +{ + switch (report_bug(regs->csr_era, regs)) { + case BUG_TRAP_TYPE_BUG: + case BUG_TRAP_TYPE_NONE: + die_if_kernel("Oops - BUG", regs); + force_sig(SIGTRAP); + break; + + case BUG_TRAP_TYPE_WARN: + /* Skip the BUG instruction and continue */ + regs->csr_era += LOONGARCH_INSN_SIZE; + break; + } +} + +asmlinkage void noinstr do_bp(struct pt_regs *regs) +{ + bool user = user_mode(regs); + unsigned int opcode, bcode; + unsigned long era = exception_era(regs); + irqentry_state_t state = irqentry_enter(regs); + + local_irq_enable(); + current->thread.trap_nr = read_csr_excode(); + if (__get_inst(&opcode, (u32 *)era, user)) + goto out_sigsegv; + + bcode = (opcode & 0x7fff); + + /* + * notify the kprobe handlers, if instruction is likely to + * pertain to them. + */ + switch (bcode) { + case BRK_KPROBE_BP: + if (notify_die(DIE_BREAK, "Kprobe", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; + else + break; + case BRK_KPROBE_SSTEPBP: + if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; + else + break; + case BRK_UPROBE_BP: + if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; + else + break; + case BRK_UPROBE_XOLBP: + if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; + else + break; + default: + if (notify_die(DIE_TRAP, "Break", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; + else + break; + } + + switch (bcode) { + case BRK_BUG: + bug_handler(regs); + break; + case BRK_DIVZERO: + die_if_kernel("Break instruction in kernel code", regs); + force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era); + break; + case BRK_OVERFLOW: + die_if_kernel("Break instruction in kernel code", regs); + force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era); + break; + default: + die_if_kernel("Break instruction in kernel code", regs); + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era); + break; + } + +out: + local_irq_disable(); + irqentry_exit(regs, state); + return; + +out_sigsegv: + force_sig(SIGSEGV); + goto out; +} + +asmlinkage void noinstr do_watch(struct pt_regs *regs) +{ + pr_warn("Hardware watch point handler not implemented!\n"); +} + +asmlinkage void noinstr do_ri(struct pt_regs *regs) +{ + int status = SIGILL; + unsigned int opcode = 0; + unsigned int __user *era = (unsigned int __user *)exception_era(regs); + irqentry_state_t state = irqentry_enter(regs); + + local_irq_enable(); + current->thread.trap_nr = read_csr_excode(); + + if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr, + SIGILL) == NOTIFY_STOP) + goto out; + + die_if_kernel("Reserved instruction in kernel code", regs); + + if (unlikely(get_user(opcode, era) < 0)) { + status = SIGSEGV; + current->thread.error_code = 1; + } + + force_sig(status); + +out: + local_irq_disable(); + irqentry_exit(regs, state); +} + +static void init_restore_fp(void) +{ + if (!used_math()) { + /* First time FP context user. */ + init_fpu(); + } else { + /* This task has formerly used the FP context */ + if (!is_fpu_owner()) + own_fpu_inatomic(1); + } + + BUG_ON(!is_fp_enabled()); +} + +asmlinkage void noinstr do_fpu(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + + local_irq_enable(); + die_if_kernel("do_fpu invoked from kernel context!", regs); + + preempt_disable(); + init_restore_fp(); + preempt_enable(); + + local_irq_disable(); + irqentry_exit(regs, state); +} + +asmlinkage void noinstr do_lsx(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + + local_irq_enable(); + force_sig(SIGILL); + local_irq_disable(); + + irqentry_exit(regs, state); +} + +asmlinkage void noinstr do_lasx(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + + local_irq_enable(); + force_sig(SIGILL); + local_irq_disable(); + + irqentry_exit(regs, state); +} + +asmlinkage void noinstr do_lbt(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + + local_irq_enable(); + force_sig(SIGILL); + local_irq_disable(); + + irqentry_exit(regs, state); +} + +asmlinkage void noinstr do_reserved(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + + local_irq_enable(); + /* + * Game over - no way to handle this if it ever occurs. Most probably + * caused by a fatal error after another hardware/software error. + */ + pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n", + read_csr_excode(), current->pid, current->comm); + die_if_kernel("do_reserved exception", regs); + force_sig(SIGUNUSED); + + local_irq_disable(); + + irqentry_exit(regs, state); +} + +asmlinkage void cache_parity_error(void) +{ + /* For the moment, report the problem and hang. */ + pr_err("Cache error exception:\n"); + pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL)); + pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA)); + panic("Can't handle the cache error!"); +} + +asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + + irq_enter_rcu(); + old_regs = set_irq_regs(regs); + handle_arch_irq(regs); + set_irq_regs(old_regs); + irq_exit_rcu(); +} + +asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp) +{ + register int cpu; + register unsigned long stack; + irqentry_state_t state = irqentry_enter(regs); + + cpu = smp_processor_id(); + + if (on_irq_stack(cpu, sp)) + handle_loongarch_irq(regs); + else { + stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START; + + /* Save task's sp on IRQ stack for unwinding */ + *(unsigned long *)stack = sp; + + __asm__ __volatile__( + "move $s0, $sp \n" /* Preserve sp */ + "move $sp, %[stk] \n" /* Switch stack */ + "move $a0, %[regs] \n" + "bl handle_loongarch_irq \n" + "move $sp, $s0 \n" /* Restore sp */ + : /* No outputs */ + : [stk] "r" (stack), [regs] "r" (regs) + : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0", + "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", + "memory"); + } + + irqentry_exit(regs, state); +} + +unsigned long eentry; +unsigned long tlbrentry; + +long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K); + +static void configure_exception_vector(void) +{ + eentry = (unsigned long)exception_handlers; + tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE; + + csr_write64(eentry, LOONGARCH_CSR_EENTRY); + csr_write64(eentry, LOONGARCH_CSR_MERRENTRY); + csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY); +} + +void per_cpu_trap_init(int cpu) +{ + unsigned int i; + + setup_vint_size(VECSIZE); + + configure_exception_vector(); + + if (!cpu_data[cpu].asid_cache) + cpu_data[cpu].asid_cache = asid_first_version(cpu); + + mmgrab(&init_mm); + current->active_mm = &init_mm; + BUG_ON(current->mm); + enter_lazy_tlb(&init_mm, current); + + /* Initialise exception handlers */ + if (cpu == 0) + for (i = 0; i < 64; i++) + set_handler(i * VECSIZE, handle_reserved, VECSIZE); + + tlb_init(cpu); + cpu_cache_init(); +} + +/* Install CPU exception handler */ +void set_handler(unsigned long offset, void *addr, unsigned long size) +{ + memcpy((void *)(eentry + offset), addr, size); + local_flush_icache_range(eentry + offset, eentry + offset + size); +} + +static const char panic_null_cerr[] = + "Trying to set NULL cache error exception handler\n"; + +/* + * Install uncached CPU exception handler. + * This is suitable only for the cache error exception which is the only + * exception handler that is being run uncached. + */ +void set_merr_handler(unsigned long offset, void *addr, unsigned long size) +{ + unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry)); + + if (!addr) + panic(panic_null_cerr); + + memcpy((void *)(uncached_eentry + offset), addr, size); +} + +void __init trap_init(void) +{ + long i; + + /* Set interrupt vector handler */ + for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++) + set_handler(i * VECSIZE, handle_vint, VECSIZE); + + set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE); + set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE); + set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE); + set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE); + set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE); + set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE); + set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE); + set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE); + set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE); + set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE); + set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE); + set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE); + + cache_error_setup(); + + local_flush_icache_range(eentry, eentry + 0x400); +} diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c new file mode 100644 index 000000000..0c20e5184 --- /dev/null +++ b/arch/loongarch/kernel/unwind_guess.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include <linux/kernel.h> + +#include <asm/unwind.h> + +unsigned long unwind_get_return_address(struct unwind_state *state) +{ + if (unwind_done(state)) + return 0; + else if (state->first) + return state->pc; + + return *(unsigned long *)(state->sp); +} +EXPORT_SYMBOL_GPL(unwind_get_return_address); + +void unwind_start(struct unwind_state *state, struct task_struct *task, + struct pt_regs *regs) +{ + memset(state, 0, sizeof(*state)); + + if (regs) { + state->sp = regs->regs[3]; + state->pc = regs->csr_era; + } else if (task && task != current) { + state->sp = thread_saved_fp(task); + state->pc = thread_saved_ra(task); + } else { + state->sp = (unsigned long)__builtin_frame_address(0); + state->pc = (unsigned long)__builtin_return_address(0); + } + + state->task = task; + state->first = true; + + get_stack_info(state->sp, state->task, &state->stack_info); + + if (!unwind_done(state) && !__kernel_text_address(state->pc)) + unwind_next_frame(state); +} +EXPORT_SYMBOL_GPL(unwind_start); + +bool unwind_next_frame(struct unwind_state *state) +{ + struct stack_info *info = &state->stack_info; + unsigned long addr; + + if (unwind_done(state)) + return false; + + if (state->first) + state->first = false; + + do { + for (state->sp += sizeof(unsigned long); + state->sp < info->end; + state->sp += sizeof(unsigned long)) { + addr = *(unsigned long *)(state->sp); + + if (__kernel_text_address(addr)) + return true; + } + + state->sp = info->next_sp; + + } while (!get_stack_info(state->sp, state->task, info)); + + return false; +} +EXPORT_SYMBOL_GPL(unwind_next_frame); diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c new file mode 100644 index 000000000..1c5b65756 --- /dev/null +++ b/arch/loongarch/kernel/unwind_prologue.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include <linux/kallsyms.h> + +#include <asm/inst.h> +#include <asm/ptrace.h> +#include <asm/unwind.h> + +unsigned long unwind_get_return_address(struct unwind_state *state) +{ + + if (unwind_done(state)) + return 0; + else if (state->type) + return state->pc; + else if (state->first) + return state->pc; + + return *(unsigned long *)(state->sp); + +} +EXPORT_SYMBOL_GPL(unwind_get_return_address); + +static bool unwind_by_guess(struct unwind_state *state) +{ + struct stack_info *info = &state->stack_info; + unsigned long addr; + + for (state->sp += sizeof(unsigned long); + state->sp < info->end; + state->sp += sizeof(unsigned long)) { + addr = *(unsigned long *)(state->sp); + if (__kernel_text_address(addr)) + return true; + } + + return false; +} + +static bool unwind_by_prologue(struct unwind_state *state) +{ + struct stack_info *info = &state->stack_info; + union loongarch_instruction *ip, *ip_end; + long frame_ra = -1; + unsigned long frame_size = 0; + unsigned long size, offset, pc = state->pc; + + if (state->sp >= info->end || state->sp < info->begin) + return false; + + if (!kallsyms_lookup_size_offset(pc, &size, &offset)) + return false; + + ip = (union loongarch_instruction *)(pc - offset); + ip_end = (union loongarch_instruction *)pc; + + while (ip < ip_end) { + if (is_stack_alloc_ins(ip)) { + frame_size = (1 << 12) - ip->reg2i12_format.immediate; + ip++; + break; + } + ip++; + } + + if (!frame_size) { + if (state->first) + goto first; + + return false; + } + + while (ip < ip_end) { + if (is_ra_save_ins(ip)) { + frame_ra = ip->reg2i12_format.immediate; + break; + } + if (is_branch_ins(ip)) + break; + ip++; + } + + if (frame_ra < 0) { + if (state->first) { + state->sp = state->sp + frame_size; + goto first; + } + return false; + } + + if (state->first) + state->first = false; + + state->pc = *(unsigned long *)(state->sp + frame_ra); + state->sp = state->sp + frame_size; + return !!__kernel_text_address(state->pc); + +first: + state->first = false; + if (state->pc == state->ra) + return false; + + state->pc = state->ra; + + return !!__kernel_text_address(state->ra); +} + +void unwind_start(struct unwind_state *state, struct task_struct *task, + struct pt_regs *regs) +{ + memset(state, 0, sizeof(*state)); + state->type = UNWINDER_PROLOGUE; + + if (regs) { + state->sp = regs->regs[3]; + state->pc = regs->csr_era; + state->ra = regs->regs[1]; + if (!__kernel_text_address(state->pc)) + state->type = UNWINDER_GUESS; + } else if (task && task != current) { + state->sp = thread_saved_fp(task); + state->pc = thread_saved_ra(task); + state->ra = 0; + } else { + state->sp = (unsigned long)__builtin_frame_address(0); + state->pc = (unsigned long)__builtin_return_address(0); + state->ra = 0; + } + + state->task = task; + state->first = true; + + get_stack_info(state->sp, state->task, &state->stack_info); + + if (!unwind_done(state) && !__kernel_text_address(state->pc)) + unwind_next_frame(state); +} +EXPORT_SYMBOL_GPL(unwind_start); + +bool unwind_next_frame(struct unwind_state *state) +{ + struct stack_info *info = &state->stack_info; + struct pt_regs *regs; + unsigned long pc; + + if (unwind_done(state)) + return false; + + do { + switch (state->type) { + case UNWINDER_GUESS: + state->first = false; + if (unwind_by_guess(state)) + return true; + break; + + case UNWINDER_PROLOGUE: + if (unwind_by_prologue(state)) + return true; + + if (info->type == STACK_TYPE_IRQ && + info->end == state->sp) { + regs = (struct pt_regs *)info->next_sp; + pc = regs->csr_era; + + if (user_mode(regs) || !__kernel_text_address(pc)) + return false; + + state->pc = pc; + state->sp = regs->regs[3]; + state->ra = regs->regs[1]; + state->first = true; + get_stack_info(state->sp, state->task, info); + + return true; + } + } + + state->sp = info->next_sp; + + } while (!get_stack_info(state->sp, state->task, info)); + + return false; +} +EXPORT_SYMBOL_GPL(unwind_next_frame); diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c new file mode 100644 index 000000000..8c9826062 --- /dev/null +++ b/arch/loongarch/kernel/vdso.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/binfmts.h> +#include <linux/elf.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/random.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/timekeeper_internal.h> + +#include <asm/page.h> +#include <asm/vdso.h> +#include <vdso/helpers.h> +#include <vdso/vsyscall.h> +#include <generated/vdso-offsets.h> + +extern char vdso_start[], vdso_end[]; + +/* Kernel-provided data used by the VDSO. */ +static union { + u8 page[VDSO_DATA_SIZE]; + struct loongarch_vdso_data vdata; +} loongarch_vdso_data __page_aligned_data; + +static struct page *vdso_pages[] = { NULL }; +struct vdso_data *vdso_data = loongarch_vdso_data.vdata.data; +struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata; + +static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) +{ + current->mm->context.vdso = (void *)(new_vma->vm_start); + + return 0; +} + +struct loongarch_vdso_info vdso_info = { + .vdso = vdso_start, + .size = PAGE_SIZE, + .code_mapping = { + .name = "[vdso]", + .pages = vdso_pages, + .mremap = vdso_mremap, + }, + .data_mapping = { + .name = "[vvar]", + }, + .offset_sigreturn = vdso_offset_sigreturn, +}; + +static int __init init_vdso(void) +{ + unsigned long i, cpu, pfn; + + BUG_ON(!PAGE_ALIGNED(vdso_info.vdso)); + BUG_ON(!PAGE_ALIGNED(vdso_info.size)); + + for_each_possible_cpu(cpu) + vdso_pdata[cpu].node = cpu_to_node(cpu); + + pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); + for (i = 0; i < vdso_info.size / PAGE_SIZE; i++) + vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i); + + return 0; +} +subsys_initcall(init_vdso); + +static unsigned long vdso_base(void) +{ + unsigned long base = STACK_TOP; + + if (current->flags & PF_RANDOMIZE) { + base += prandom_u32_max(VDSO_RANDOMIZE_SIZE); + base = PAGE_ALIGN(base); + } + + return base; +} + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + int ret; + unsigned long vvar_size, size, data_addr, vdso_addr; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct loongarch_vdso_info *info = current->thread.vdso; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + /* + * Determine total area size. This includes the VDSO data itself + * and the data pages. + */ + vvar_size = VDSO_DATA_SIZE; + size = vvar_size + info->size; + + data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0); + if (IS_ERR_VALUE(data_addr)) { + ret = data_addr; + goto out; + } + vdso_addr = data_addr + VDSO_DATA_SIZE; + + vma = _install_special_mapping(mm, data_addr, vvar_size, + VM_READ | VM_MAYREAD, + &info->data_mapping); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out; + } + + /* Map VDSO data page. */ + ret = remap_pfn_range(vma, data_addr, + virt_to_phys(&loongarch_vdso_data) >> PAGE_SHIFT, + vvar_size, PAGE_READONLY); + if (ret) + goto out; + + /* Map VDSO code page. */ + vma = _install_special_mapping(mm, vdso_addr, info->size, + VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + &info->code_mapping); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out; + } + + mm->context.vdso = (void *)vdso_addr; + ret = 0; + +out: + mmap_write_unlock(mm); + return ret; +} diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S new file mode 100644 index 000000000..b3309a5e6 --- /dev/null +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/sizes.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> + +#define PAGE_SIZE _PAGE_SIZE + +/* + * Put .bss..swapper_pg_dir as the first thing in .bss. This will + * ensure that it has .bss alignment (64K). + */ +#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) + +#include <asm-generic/vmlinux.lds.h> +#include "image-vars.h" + +/* + * Max avaliable Page Size is 64K, so we set SectionAlignment + * field of EFI application to 64K. + */ +PECOFF_FILE_ALIGN = 0x200; +PECOFF_SEGMENT_ALIGN = 0x10000; + +OUTPUT_ARCH(loongarch) +ENTRY(kernel_entry) +PHDRS { + text PT_LOAD FLAGS(7); /* RWX */ + note PT_NOTE FLAGS(4); /* R__ */ +} + +jiffies = jiffies_64; + +SECTIONS +{ + . = VMLINUX_LOAD_ADDRESS; + + _text = .; + HEAD_TEXT_SECTION + + . = ALIGN(PECOFF_SEGMENT_ALIGN); + _stext = .; + .text : { + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) + } :text = 0 + . = ALIGN(PECOFF_SEGMENT_ALIGN); + _etext = .; + + EXCEPTION_TABLE(16) + + .got : ALIGN(16) { *(.got) } + .plt : ALIGN(16) { *(.plt) } + .got.plt : ALIGN(16) { *(.got.plt) } + + . = ALIGN(PECOFF_SEGMENT_ALIGN); + __init_begin = .; + __inittext_begin = .; + + INIT_TEXT_SECTION(PAGE_SIZE) + .exit.text : { + EXIT_TEXT + } + + . = ALIGN(PECOFF_SEGMENT_ALIGN); + __inittext_end = .; + + __initdata_begin = .; + + INIT_DATA_SECTION(16) + .exit.data : { + EXIT_DATA + } + +#ifdef CONFIG_SMP + PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT) +#endif + + .rela.dyn : ALIGN(8) { *(.rela.dyn) *(.rela*) } + + .init.bss : { + *(.init.bss) + } + . = ALIGN(PECOFF_SEGMENT_ALIGN); + __initdata_end = .; + + __init_end = .; + + _sdata = .; + RO_DATA(4096) + RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE) + + .sdata : { + *(.sdata) + } + .edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGN); } + _edata = .; + + BSS_SECTION(0, SZ_64K, 8) + . = ALIGN(PECOFF_SEGMENT_ALIGN); + + _end = .; + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + .gptab.sdata : { + *(.gptab.data) + *(.gptab.sdata) + } + .gptab.sbss : { + *(.gptab.bss) + *(.gptab.sbss) + } + + DISCARDS + /DISCARD/ : { + *(.gnu.attributes) + *(.options) + *(.eh_frame) + } +} diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile new file mode 100644 index 000000000..e36635fcc --- /dev/null +++ b/arch/loongarch/lib/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for LoongArch-specific library files. +# + +lib-y += delay.o clear_user.o copy_user.o dump_tlb.o diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S new file mode 100644 index 000000000..16ba2b8dd --- /dev/null +++ b/arch/loongarch/lib/clear_user.S @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/export.h> +#include <asm/regdef.h> + +.macro fixup_ex from, to, offset, fix +.if \fix + .section .fixup, "ax" +\to: addi.d a0, a1, \offset + jr ra + .previous +.endif + .section __ex_table, "a" + PTR \from\()b, \to\()b + .previous +.endm + +/* + * unsigned long __clear_user(void *addr, size_t size) + * + * a0: addr + * a1: size + */ +SYM_FUNC_START(__clear_user) + beqz a1, 2f + +1: st.b zero, a0, 0 + addi.d a0, a0, 1 + addi.d a1, a1, -1 + bgtz a1, 1b + +2: move a0, a1 + jr ra + + fixup_ex 1, 3, 0, 1 +SYM_FUNC_END(__clear_user) + +EXPORT_SYMBOL(__clear_user) diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S new file mode 100644 index 000000000..97d20327a --- /dev/null +++ b/arch/loongarch/lib/copy_user.S @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/export.h> +#include <asm/regdef.h> + +.macro fixup_ex from, to, offset, fix +.if \fix + .section .fixup, "ax" +\to: addi.d a0, a2, \offset + jr ra + .previous +.endif + .section __ex_table, "a" + PTR \from\()b, \to\()b + .previous +.endm + +/* + * unsigned long __copy_user(void *to, const void *from, size_t n) + * + * a0: to + * a1: from + * a2: n + */ +SYM_FUNC_START(__copy_user) + beqz a2, 3f + +1: ld.b t0, a1, 0 +2: st.b t0, a0, 0 + addi.d a0, a0, 1 + addi.d a1, a1, 1 + addi.d a2, a2, -1 + bgtz a2, 1b + +3: move a0, a2 + jr ra + + fixup_ex 1, 4, 0, 1 + fixup_ex 2, 4, 0, 0 +SYM_FUNC_END(__copy_user) + +EXPORT_SYMBOL(__copy_user) diff --git a/arch/loongarch/lib/delay.c b/arch/loongarch/lib/delay.c new file mode 100644 index 000000000..831d4761f --- /dev/null +++ b/arch/loongarch/lib/delay.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/smp.h> +#include <linux/timex.h> + +#include <asm/processor.h> + +void __delay(unsigned long cycles) +{ + u64 t0 = get_cycles(); + + while ((unsigned long)(get_cycles() - t0) < cycles) + cpu_relax(); +} +EXPORT_SYMBOL(__delay); + +/* + * Division by multiplication: you don't have to worry about + * loss of precision. + * + * Use only for very small delays ( < 1 msec). Should probably use a + * lookup table, really, as the multiplications take much too long with + * short delays. This is a "reasonable" implementation, though (and the + * first constant multiplications gets optimized away if the delay is + * a constant) + */ + +void __udelay(unsigned long us) +{ + __delay((us * 0x000010c7ull * HZ * lpj_fine) >> 32); +} +EXPORT_SYMBOL(__udelay); + +void __ndelay(unsigned long ns) +{ + __delay((ns * 0x00000005ull * HZ * lpj_fine) >> 32); +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c new file mode 100644 index 000000000..c2cc7ce34 --- /dev/null +++ b/arch/loongarch/lib/dump_tlb.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle. + * Copyright (C) 1999 by Silicon Graphics, Inc. + */ +#include <linux/kernel.h> +#include <linux/mm.h> + +#include <asm/loongarch.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/tlb.h> + +void dump_tlb_regs(void) +{ + const int field = 2 * sizeof(unsigned long); + + pr_info("Index : 0x%0x\n", read_csr_tlbidx()); + pr_info("PageSize : 0x%0x\n", read_csr_pagesize()); + pr_info("EntryHi : 0x%0*llx\n", field, read_csr_entryhi()); + pr_info("EntryLo0 : 0x%0*llx\n", field, read_csr_entrylo0()); + pr_info("EntryLo1 : 0x%0*llx\n", field, read_csr_entrylo1()); +} + +static void dump_tlb(int first, int last) +{ + unsigned long s_entryhi, entryhi, asid; + unsigned long long entrylo0, entrylo1, pa; + unsigned int index; + unsigned int s_index, s_asid; + unsigned int pagesize, c0, c1, i; + unsigned long asidmask = cpu_asid_mask(¤t_cpu_data); + int pwidth = 16; + int vwidth = 16; + int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4); + + s_entryhi = read_csr_entryhi(); + s_index = read_csr_tlbidx(); + s_asid = read_csr_asid(); + + for (i = first; i <= last; i++) { + write_csr_index(i); + tlb_read(); + pagesize = read_csr_pagesize(); + entryhi = read_csr_entryhi(); + entrylo0 = read_csr_entrylo0(); + entrylo1 = read_csr_entrylo1(); + index = read_csr_tlbidx(); + asid = read_csr_asid(); + + /* EHINV bit marks entire entry as invalid */ + if (index & CSR_TLBIDX_EHINV) + continue; + /* + * ASID takes effect in absence of G (global) bit. + */ + if (!((entrylo0 | entrylo1) & ENTRYLO_G) && + asid != s_asid) + continue; + + /* + * Only print entries in use + */ + pr_info("Index: %4d pgsize=0x%x ", i, (1 << pagesize)); + + c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; + c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; + + pr_cont("va=0x%0*lx asid=0x%0*lx", + vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask); + + /* NR/NX are in awkward places, so mask them off separately */ + pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX); + pa = pa & PAGE_MASK; + pr_cont("\n\t["); + pr_cont("nr=%d nx=%d ", + (entrylo0 & ENTRYLO_NR) ? 1 : 0, + (entrylo0 & ENTRYLO_NX) ? 1 : 0); + pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [", + pwidth, pa, c0, + (entrylo0 & ENTRYLO_D) ? 1 : 0, + (entrylo0 & ENTRYLO_V) ? 1 : 0, + (entrylo0 & ENTRYLO_G) ? 1 : 0, + (entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT); + /* NR/NX are in awkward places, so mask them off separately */ + pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX); + pa = pa & PAGE_MASK; + pr_cont("nr=%d nx=%d ", + (entrylo1 & ENTRYLO_NR) ? 1 : 0, + (entrylo1 & ENTRYLO_NX) ? 1 : 0); + pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n", + pwidth, pa, c1, + (entrylo1 & ENTRYLO_D) ? 1 : 0, + (entrylo1 & ENTRYLO_V) ? 1 : 0, + (entrylo1 & ENTRYLO_G) ? 1 : 0, + (entrylo1 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT); + } + pr_info("\n"); + + write_csr_entryhi(s_entryhi); + write_csr_tlbidx(s_index); + write_csr_asid(s_asid); +} + +void dump_tlb_all(void) +{ + dump_tlb(0, current_cpu_data.tlbsize - 1); +} diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile new file mode 100644 index 000000000..8ffc6383f --- /dev/null +++ b/arch/loongarch/mm/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/LoongArch-specific parts of the memory manager. +# + +obj-y += init.o cache.o tlb.o tlbex.o extable.o \ + fault.o ioremap.o maccess.o mmap.o pgtable.o page.o + +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c new file mode 100644 index 000000000..72685a48e --- /dev/null +++ b/arch/loongarch/mm/cache.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2007 MIPS Technologies, Inc. + */ +#include <linux/cacheinfo.h> +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/linkage.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/syscalls.h> + +#include <asm/bootinfo.h> +#include <asm/cacheflush.h> +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm/loongarch.h> +#include <asm/numa.h> +#include <asm/processor.h> +#include <asm/setup.h> + +void cache_error_setup(void) +{ + extern char __weak except_vec_cex; + set_merr_handler(0x0, &except_vec_cex, 0x80); +} + +/* + * LoongArch maintains ICache/DCache coherency by hardware, + * we just need "ibar" to avoid instruction hazard here. + */ +void local_flush_icache_range(unsigned long start, unsigned long end) +{ + asm volatile ("\tibar 0\n"::); +} +EXPORT_SYMBOL(local_flush_icache_range); + +static void flush_cache_leaf(unsigned int leaf) +{ + int i, j, nr_nodes; + uint64_t addr = CSR_DMW0_BASE; + struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf; + + nr_nodes = cache_private(cdesc) ? 1 : loongson_sysconf.nr_nodes; + + do { + for (i = 0; i < cdesc->sets; i++) { + for (j = 0; j < cdesc->ways; j++) { + flush_cache_line(leaf, addr); + addr++; + } + + addr -= cdesc->ways; + addr += cdesc->linesz; + } + addr += (1ULL << NODE_ADDRSPACE_SHIFT); + } while (--nr_nodes > 0); +} + +asmlinkage __visible void __flush_cache_all(void) +{ + int leaf; + struct cache_desc *cdesc = current_cpu_data.cache_leaves; + unsigned int cache_present = current_cpu_data.cache_leaves_present; + + leaf = cache_present - 1; + if (cache_inclusive(cdesc + leaf)) { + flush_cache_leaf(leaf); + return; + } + + for (leaf = 0; leaf < cache_present; leaf++) + flush_cache_leaf(leaf); +} + +#define L1IUPRE (1 << 0) +#define L1IUUNIFY (1 << 1) +#define L1DPRE (1 << 2) + +#define LXIUPRE (1 << 0) +#define LXIUUNIFY (1 << 1) +#define LXIUPRIV (1 << 2) +#define LXIUINCL (1 << 3) +#define LXDPRE (1 << 4) +#define LXDPRIV (1 << 5) +#define LXDINCL (1 << 6) + +#define populate_cache_properties(cfg0, cdesc, level, leaf) \ +do { \ + unsigned int cfg1; \ + \ + cfg1 = read_cpucfg(LOONGARCH_CPUCFG17 + leaf); \ + if (level == 1) { \ + cdesc->flags |= CACHE_PRIVATE; \ + } else { \ + if (cfg0 & LXIUPRIV) \ + cdesc->flags |= CACHE_PRIVATE; \ + if (cfg0 & LXIUINCL) \ + cdesc->flags |= CACHE_INCLUSIVE; \ + } \ + cdesc->level = level; \ + cdesc->flags |= CACHE_PRESENT; \ + cdesc->ways = ((cfg1 & CPUCFG_CACHE_WAYS_M) >> CPUCFG_CACHE_WAYS) + 1; \ + cdesc->sets = 1 << ((cfg1 & CPUCFG_CACHE_SETS_M) >> CPUCFG_CACHE_SETS); \ + cdesc->linesz = 1 << ((cfg1 & CPUCFG_CACHE_LSIZE_M) >> CPUCFG_CACHE_LSIZE); \ + cdesc++; leaf++; \ +} while (0) + +void cpu_cache_init(void) +{ + unsigned int leaf = 0, level = 1; + unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16); + struct cache_desc *cdesc = current_cpu_data.cache_leaves; + + if (config & L1IUPRE) { + if (config & L1IUUNIFY) + cdesc->type = CACHE_TYPE_UNIFIED; + else + cdesc->type = CACHE_TYPE_INST; + populate_cache_properties(config, cdesc, level, leaf); + } + + if (config & L1DPRE) { + cdesc->type = CACHE_TYPE_DATA; + populate_cache_properties(config, cdesc, level, leaf); + } + + config = config >> 3; + for (level = 2; level <= CACHE_LEVEL_MAX; level++) { + if (!config) + break; + + if (config & LXIUPRE) { + if (config & LXIUUNIFY) + cdesc->type = CACHE_TYPE_UNIFIED; + else + cdesc->type = CACHE_TYPE_INST; + populate_cache_properties(config, cdesc, level, leaf); + } + + if (config & LXDPRE) { + cdesc->type = CACHE_TYPE_DATA; + populate_cache_properties(config, cdesc, level, leaf); + } + + config = config >> 7; + } + + BUG_ON(leaf > CACHE_LEAVES_MAX); + + current_cpu_data.cache_leaves_present = leaf; + current_cpu_data.options |= LOONGARCH_CPU_PREFETCH; + shm_align_mask = PAGE_SIZE - 1; +} + +static const pgprot_t protection_map[16] = { + [VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER | + _PAGE_PROTNONE | _PAGE_NO_EXEC | + _PAGE_NO_READ), + [VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER | + _PAGE_PROTNONE | _PAGE_NO_EXEC | + _PAGE_NO_READ), + [VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC | _PAGE_WRITE), + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC | _PAGE_WRITE), + [VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_WRITE), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_WRITE) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/loongarch/mm/extable.c b/arch/loongarch/mm/extable.c new file mode 100644 index 000000000..bc20988f2 --- /dev/null +++ b/arch/loongarch/mm/extable.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/extable.h> +#include <linux/spinlock.h> +#include <asm/branch.h> +#include <linux/uaccess.h> + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(exception_era(regs)); + if (fixup) { + regs->csr_era = fixup->fixup; + + return 1; + } + + return 0; +} diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c new file mode 100644 index 000000000..b829ab911 --- /dev/null +++ b/arch/loongarch/mm/fault.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1995 - 2000 by Ralf Baechle + */ +#include <linux/context_tracking.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/entry-common.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/ratelimit.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/kdebug.h> +#include <linux/kprobes.h> +#include <linux/perf_event.h> +#include <linux/uaccess.h> + +#include <asm/branch.h> +#include <asm/mmu_context.h> +#include <asm/ptrace.h> + +int show_unhandled_signals = 1; + +static void __kprobes no_context(struct pt_regs *regs, unsigned long address) +{ + const int field = sizeof(unsigned long) * 2; + + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + + pr_alert("CPU %d Unable to handle kernel paging request at " + "virtual address %0*lx, era == %0*lx, ra == %0*lx\n", + raw_smp_processor_id(), field, address, field, regs->csr_era, + field, regs->regs[1]); + die("Oops", regs); +} + +static void __kprobes do_out_of_memory(struct pt_regs *regs, unsigned long address) +{ + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + if (!user_mode(regs)) { + no_context(regs, address); + return; + } + pagefault_out_of_memory(); +} + +static void __kprobes do_sigbus(struct pt_regs *regs, + unsigned long write, unsigned long address, int si_code) +{ + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) { + no_context(regs, address); + return; + } + + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + current->thread.csr_badvaddr = address; + current->thread.trap_nr = read_csr_excode(); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); +} + +static void __kprobes do_sigsegv(struct pt_regs *regs, + unsigned long write, unsigned long address, int si_code) +{ + const int field = sizeof(unsigned long) * 2; + static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) { + no_context(regs, address); + return; + } + + /* User mode accesses just cause a SIGSEGV */ + current->thread.csr_badvaddr = address; + if (!write) + current->thread.error_code = 1; + else + current->thread.error_code = 2; + current->thread.trap_nr = read_csr_excode(); + + if (show_unhandled_signals && + unhandled_signal(current, SIGSEGV) && __ratelimit(&ratelimit_state)) { + pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n", + current->comm, + write ? "write access to" : "read access from", + field, address); + pr_info("era = %0*lx in", field, + (unsigned long) regs->csr_era); + print_vma_addr(KERN_CONT " ", regs->csr_era); + pr_cont("\n"); + pr_info("ra = %0*lx in", field, + (unsigned long) regs->regs[1]); + print_vma_addr(KERN_CONT " ", regs->regs[1]); + pr_cont("\n"); + } + force_sig_fault(SIGSEGV, si_code, (void __user *)address); +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + */ +static void __kprobes __do_page_fault(struct pt_regs *regs, + unsigned long write, unsigned long address) +{ + int si_code = SEGV_MAPERR; + unsigned int flags = FAULT_FLAG_DEFAULT; + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + struct vm_area_struct *vma = NULL; + vm_fault_t fault; + + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (address & __UA_LIMIT) { + if (!user_mode(regs)) + no_context(regs, address); + else + do_sigsegv(regs, write, address, si_code); + return; + } + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (faulthandler_disabled() || !mm) { + do_sigsegv(regs, write, address, si_code); + return; + } + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +retry: + vma = lock_mm_and_find_vma(mm, address, regs); + if (unlikely(!vma)) + goto bad_area_nosemaphore; + goto good_area; + +/* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + mmap_read_unlock(mm); +bad_area_nosemaphore: + do_sigsegv(regs, write, address, si_code); + return; + +/* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + si_code = SEGV_ACCERR; + + if (write) { + flags |= FAULT_FLAG_WRITE; + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (!(vma->vm_flags & VM_READ) && address != exception_era(regs)) + goto bad_area; + if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, address); + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + + if (unlikely(fault & VM_FAULT_RETRY)) { + flags |= FAULT_FLAG_TRIED; + + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + goto retry; + } + if (unlikely(fault & VM_FAULT_ERROR)) { + mmap_read_unlock(mm); + if (fault & VM_FAULT_OOM) { + do_out_of_memory(regs, address); + return; + } else if (fault & VM_FAULT_SIGSEGV) { + do_sigsegv(regs, write, address, si_code); + return; + } else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + do_sigbus(regs, write, address, si_code); + return; + } + BUG(); + } + + mmap_read_unlock(mm); +} + +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long write, unsigned long address) +{ + irqentry_state_t state = irqentry_enter(regs); + + /* Enable interrupt if enabled in parent context */ + if (likely(regs->csr_prmd & CSR_PRMD_PIE)) + local_irq_enable(); + + __do_page_fault(regs, write, address); + + local_irq_disable(); + + irqentry_exit(regs, state); +} diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c new file mode 100644 index 000000000..ba138117b --- /dev/null +++ b/arch/loongarch/mm/hugetlbpage.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/pagemap.h> +#include <linux/err.h> +#include <linux/sysctl.h> +#include <asm/mman.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (pud) + pte = (pte_t *)pmd_alloc(mm, pud, addr); + + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (p4d_present(*p4d)) { + pud = pud_offset(p4d, addr); + if (pud_present(*pud)) + pmd = pmd_offset(pud, addr); + } + } + return (pte_t *) pmd; +} + +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +int pmd_huge(pmd_t pmd) +{ + return (pmd_val(pmd) & _PAGE_HUGE) != 0; +} + +int pud_huge(pud_t pud) +{ + return (pud_val(pud) & _PAGE_HUGE) != 0; +} + +uint64_t pmd_to_entrylo(unsigned long pmd_val) +{ + uint64_t val; + /* PMD as PTE. Must be huge page */ + if (!pmd_huge(__pmd(pmd_val))) + panic("%s", __func__); + + val = pmd_val ^ _PAGE_HUGE; + val |= ((val & _PAGE_HGLOBAL) >> + (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)); + + return val; +} diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c new file mode 100644 index 000000000..f42a3be5f --- /dev/null +++ b/arch/loongarch/mm/init.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/init.h> +#include <linux/export.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/pagemap.h> +#include <linux/memblock.h> +#include <linux/memremap.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/highmem.h> +#include <linux/swap.h> +#include <linux/proc_fs.h> +#include <linux/pfn.h> +#include <linux/hardirq.h> +#include <linux/gfp.h> +#include <linux/initrd.h> +#include <linux/mmzone.h> + +#include <asm/asm-offsets.h> +#include <asm/bootinfo.h> +#include <asm/cpu.h> +#include <asm/dma.h> +#include <asm/mmu_context.h> +#include <asm/sections.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/tlb.h> + +/* + * We have up to 8 empty zeroed pages so we can map one of the right colour + * when needed. Since page is never written to after the initialization we + * don't have to care about aliases on other CPUs. + */ +unsigned long empty_zero_page, zero_page_mask; +EXPORT_SYMBOL(empty_zero_page); +EXPORT_SYMBOL(zero_page_mask); + +void setup_zero_pages(void) +{ + unsigned int order, i; + struct page *page; + + order = 0; + + empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!empty_zero_page) + panic("Oh boy, that early out of memory?"); + + page = virt_to_page((void *)empty_zero_page); + split_page(page, order); + for (i = 0; i < (1 << order); i++, page++) + mark_page_reserved(page); + + zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *vfrom, *vto; + + vfrom = kmap_local_page(from); + vto = kmap_local_page(to); + copy_page(vto, vfrom); + kunmap_local(vfrom); + kunmap_local(vto); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} + +int __ref page_is_ram(unsigned long pfn) +{ + unsigned long addr = PFN_PHYS(pfn); + + return memblock_is_memory(addr) && !memblock_is_reserved(addr); +} + +#ifndef CONFIG_NUMA +void __init paging_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + +#ifdef CONFIG_ZONE_DMA + max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; +#endif +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + + free_area_init(max_zone_pfns); +} + +void __init mem_init(void) +{ + max_mapnr = max_low_pfn; + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + + memblock_free_all(); + setup_zero_pages(); /* Setup zeroed pages. */ +} +#endif /* !CONFIG_NUMA */ + +void __ref free_initmem(void) +{ + free_initmem_default(POISON_FREE_INITMEM); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + ret = __add_pages(nid, start_pfn, nr_pages, params); + + if (ret) + pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n", + __func__, ret); + + return ret; +} + +void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + struct page *page = pfn_to_page(start_pfn); + + /* With altmap the first mapped page is offset from @start */ + if (altmap) + page += vmem_altmap_offset(altmap); + __remove_pages(start_pfn, nr_pages, altmap); +} + +#ifdef CONFIG_NUMA +int memory_add_physaddr_to_nid(u64 start) +{ + return pa_to_nid(start); +} +EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); +#endif +#endif + +static pte_t *fixmap_pte(unsigned long addr) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_k(addr); + p4d = p4d_offset(pgd, addr); + + if (pgd_none(*pgd)) { + pud_t *new __maybe_unused; + + new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + pgd_populate(&init_mm, pgd, new); +#ifndef __PAGETABLE_PUD_FOLDED + pud_init((unsigned long)new, (unsigned long)invalid_pmd_table); +#endif + } + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + pmd_t *new __maybe_unused; + + new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + pud_populate(&init_mm, pud, new); +#ifndef __PAGETABLE_PMD_FOLDED + pmd_init((unsigned long)new, (unsigned long)invalid_pte_table); +#endif + } + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + pte_t *new __maybe_unused; + + new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, new); + } + + return pte_offset_kernel(pmd, addr); +} + +void __init __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *ptep; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + ptep = fixmap_pte(addr); + if (!pte_none(*ptep)) { + pte_ERROR(*ptep); + return; + } + + if (pgprot_val(flags)) + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); + else { + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + } +} + +/* + * Align swapper_pg_dir in to 64K, allows its address to be loaded + * with a single LUI instruction in the TLB handlers. If we used + * __aligned(64K), its size would get rounded up to the alignment + * size, and waste space. So we place it in its own section and align + * it in the linker script. + */ +pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); + +pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss; +#ifndef __PAGETABLE_PUD_FOLDED +pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; +EXPORT_SYMBOL(invalid_pud_table); +#endif +#ifndef __PAGETABLE_PMD_FOLDED +pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; +EXPORT_SYMBOL(invalid_pmd_table); +#endif +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +EXPORT_SYMBOL(invalid_pte_table); diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c new file mode 100644 index 000000000..73b0980ab --- /dev/null +++ b/arch/loongarch/mm/ioremap.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <asm/io.h> + +void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size) +{ + return ((void __iomem *)TO_CACHE(phys_addr)); +} + +void __init early_iounmap(void __iomem *addr, unsigned long size) +{ + +} + +void *early_memremap_ro(resource_size_t phys_addr, unsigned long size) +{ + return early_memremap(phys_addr, size); +} + +void *early_memremap_prot(resource_size_t phys_addr, unsigned long size, + unsigned long prot_val) +{ + return early_memremap(phys_addr, size); +} diff --git a/arch/loongarch/mm/maccess.c b/arch/loongarch/mm/maccess.c new file mode 100644 index 000000000..58173842c --- /dev/null +++ b/arch/loongarch/mm/maccess.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/uaccess.h> +#include <linux/kernel.h> + +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) +{ + /* highest bit set means kernel space */ + return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1); +} diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c new file mode 100644 index 000000000..fbe1a4856 --- /dev/null +++ b/arch/loongarch/mm/mmap.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/export.h> +#include <linux/io.h> +#include <linux/memblock.h> +#include <linux/mm.h> +#include <linux/mman.h> + +unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ +EXPORT_SYMBOL(shm_align_mask); + +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + shm_align_mask) & ~shm_align_mask) + \ + (((pgoff) << PAGE_SHIFT) & shm_align_mask)) + +enum mmap_allocation_direction {UP, DOWN}; + +static unsigned long arch_get_unmapped_area_common(struct file *filp, + unsigned long addr0, unsigned long len, unsigned long pgoff, + unsigned long flags, enum mmap_allocation_direction dir) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long addr = addr0; + int do_color_align; + struct vm_unmapped_area_info info; + + if (unlikely(len > TASK_SIZE)) + return -ENOMEM; + + if (flags & MAP_FIXED) { + /* Even MAP_FIXED mappings must reside within TASK_SIZE */ + if (TASK_SIZE - len < addr) + return -EINVAL; + + /* + * We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) + return -EINVAL; + return addr; + } + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + + /* requesting a specific address */ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.length = len; + info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + + if (dir == DOWN) { + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + addr = vm_unmapped_area(&info); + + if (!(addr & ~PAGE_MASK)) + return addr; + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + } + + info.flags = 0; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + return vm_unmapped_area(&info); +} + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + return arch_get_unmapped_area_common(filp, + addr0, len, pgoff, flags, UP); +} + +/* + * There is no need to export this but sched.h declares the function as + * extern so making it static here results in an error. + */ +unsigned long arch_get_unmapped_area_topdown(struct file *filp, + unsigned long addr0, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + return arch_get_unmapped_area_common(filp, + addr0, len, pgoff, flags, DOWN); +} + +int __virt_addr_valid(volatile void *kaddr) +{ + unsigned long vaddr = (unsigned long)kaddr; + + if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base)) + return 0; + + return pfn_valid(PFN_DOWN(PHYSADDR(kaddr))); +} +EXPORT_SYMBOL_GPL(__virt_addr_valid); + +/* + * You really shouldn't be using read() or write() on /dev/mem. This might go + * away in the future. + */ +int valid_phys_addr_range(phys_addr_t addr, size_t size) +{ + /* + * Check whether addr is covered by a memory region without the + * MEMBLOCK_NOMAP attribute, and whether that region covers the + * entire range. In theory, this could lead to false negatives + * if the range is covered by distinct but adjacent memory regions + * that only differ in other attributes. However, few of such + * attributes have been defined, and it is debatable whether it + * follows that /dev/mem read() calls should be able traverse + * such boundaries. + */ + return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr); +} + +/* + * Do not allow /dev/mem mappings beyond the supported physical range. + */ +int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) +{ + return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0))); +} diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S new file mode 100644 index 000000000..4c874a7af --- /dev/null +++ b/arch/loongarch/mm/page.S @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/linkage.h> +#include <asm/asm.h> +#include <asm/export.h> +#include <asm/page.h> +#include <asm/regdef.h> + + .align 5 +SYM_FUNC_START(clear_page) + lu12i.w t0, 1 << (PAGE_SHIFT - 12) + add.d t0, t0, a0 +1: + st.d zero, a0, 0 + st.d zero, a0, 8 + st.d zero, a0, 16 + st.d zero, a0, 24 + st.d zero, a0, 32 + st.d zero, a0, 40 + st.d zero, a0, 48 + st.d zero, a0, 56 + addi.d a0, a0, 128 + st.d zero, a0, -64 + st.d zero, a0, -56 + st.d zero, a0, -48 + st.d zero, a0, -40 + st.d zero, a0, -32 + st.d zero, a0, -24 + st.d zero, a0, -16 + st.d zero, a0, -8 + bne t0, a0, 1b + + jr ra +SYM_FUNC_END(clear_page) +EXPORT_SYMBOL(clear_page) + +.align 5 +SYM_FUNC_START(copy_page) + lu12i.w t8, 1 << (PAGE_SHIFT - 12) + add.d t8, t8, a0 +1: + ld.d t0, a1, 0 + ld.d t1, a1, 8 + ld.d t2, a1, 16 + ld.d t3, a1, 24 + ld.d t4, a1, 32 + ld.d t5, a1, 40 + ld.d t6, a1, 48 + ld.d t7, a1, 56 + + st.d t0, a0, 0 + st.d t1, a0, 8 + ld.d t0, a1, 64 + ld.d t1, a1, 72 + st.d t2, a0, 16 + st.d t3, a0, 24 + ld.d t2, a1, 80 + ld.d t3, a1, 88 + st.d t4, a0, 32 + st.d t5, a0, 40 + ld.d t4, a1, 96 + ld.d t5, a1, 104 + st.d t6, a0, 48 + st.d t7, a0, 56 + ld.d t6, a1, 112 + ld.d t7, a1, 120 + addi.d a0, a0, 128 + addi.d a1, a1, 128 + + st.d t0, a0, -64 + st.d t1, a0, -56 + st.d t2, a0, -48 + st.d t3, a0, -40 + st.d t4, a0, -32 + st.d t5, a0, -24 + st.d t6, a0, -16 + st.d t7, a0, -8 + + bne t8, a0, 1b + jr ra +SYM_FUNC_END(copy_page) +EXPORT_SYMBOL(copy_page) diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c new file mode 100644 index 000000000..ee179ccd3 --- /dev/null +++ b/arch/loongarch/mm/pgtable.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/init.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *) __get_free_page(GFP_KERNEL); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return ret; +} +EXPORT_SYMBOL_GPL(pgd_alloc); + +void pgd_init(unsigned long page) +{ + unsigned long *p, *end; + unsigned long entry; + +#if !defined(__PAGETABLE_PUD_FOLDED) + entry = (unsigned long)invalid_pud_table; +#elif !defined(__PAGETABLE_PMD_FOLDED) + entry = (unsigned long)invalid_pmd_table; +#else + entry = (unsigned long)invalid_pte_table; +#endif + + p = (unsigned long *) page; + end = p + PTRS_PER_PGD; + + do { + p[0] = entry; + p[1] = entry; + p[2] = entry; + p[3] = entry; + p[4] = entry; + p += 8; + p[-3] = entry; + p[-2] = entry; + p[-1] = entry; + } while (p != end); +} +EXPORT_SYMBOL_GPL(pgd_init); + +#ifndef __PAGETABLE_PMD_FOLDED +void pmd_init(unsigned long addr, unsigned long pagetable) +{ + unsigned long *p, *end; + + p = (unsigned long *) addr; + end = p + PTRS_PER_PMD; + + do { + p[0] = pagetable; + p[1] = pagetable; + p[2] = pagetable; + p[3] = pagetable; + p[4] = pagetable; + p += 8; + p[-3] = pagetable; + p[-2] = pagetable; + p[-1] = pagetable; + } while (p != end); +} +EXPORT_SYMBOL_GPL(pmd_init); +#endif + +#ifndef __PAGETABLE_PUD_FOLDED +void pud_init(unsigned long addr, unsigned long pagetable) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PUD; + + do { + p[0] = pagetable; + p[1] = pagetable; + p[2] = pagetable; + p[3] = pagetable; + p[4] = pagetable; + p += 8; + p[-3] = pagetable; + p[-2] = pagetable; + p[-1] = pagetable; + } while (p != end); +} +#endif + +pmd_t mk_pmd(struct page *page, pgprot_t prot) +{ + pmd_t pmd; + + pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); + + return pmd; +} + +void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; + flush_tlb_all(); +} + +void __init pagetable_init(void) +{ + /* Initialize the entire pgd. */ + pgd_init((unsigned long)swapper_pg_dir); + pgd_init((unsigned long)invalid_pg_dir); +#ifndef __PAGETABLE_PUD_FOLDED + pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table); +#endif +#ifndef __PAGETABLE_PMD_FOLDED + pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); +#endif +} diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c new file mode 100644 index 000000000..da3681f13 --- /dev/null +++ b/arch/loongarch/mm/tlb.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/export.h> + +#include <asm/cpu.h> +#include <asm/bootinfo.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/tlb.h> + +void local_flush_tlb_all(void) +{ + invtlb_all(INVTLB_CURRENT_ALL, 0, 0); +} +EXPORT_SYMBOL(local_flush_tlb_all); + +void local_flush_tlb_user(void) +{ + invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0); +} +EXPORT_SYMBOL(local_flush_tlb_user); + +void local_flush_tlb_kernel(void) +{ + invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0); +} +EXPORT_SYMBOL(local_flush_tlb_kernel); + +/* + * All entries common to a mm share an asid. To effectively flush + * these entries, we just bump the asid. + */ +void local_flush_tlb_mm(struct mm_struct *mm) +{ + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + + if (asid_valid(mm, cpu)) + drop_mmu_context(mm, cpu); + else + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + + preempt_enable(); +} + +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + int cpu = smp_processor_id(); + + if (asid_valid(mm, cpu)) { + unsigned long size, flags; + + local_irq_save(flags); + start = round_down(start, PAGE_SIZE << 1); + end = round_up(end, PAGE_SIZE << 1); + size = (end - start) >> (PAGE_SHIFT + 1); + if (size <= (current_cpu_data.tlbsizestlbsets ? + current_cpu_data.tlbsize / 8 : + current_cpu_data.tlbsize / 2)) { + int asid = cpu_asid(cpu, mm); + + while (start < end) { + invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start); + start += (PAGE_SIZE << 1); + } + } else { + drop_mmu_context(mm, cpu); + } + local_irq_restore(flags); + } else { + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + } +} + +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long size, flags; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + size = (size + 1) >> 1; + if (size <= (current_cpu_data.tlbsizestlbsets ? + current_cpu_data.tlbsize / 8 : + current_cpu_data.tlbsize / 2)) { + + start &= (PAGE_MASK << 1); + end += ((PAGE_SIZE << 1) - 1); + end &= (PAGE_MASK << 1); + + while (start < end) { + invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start); + start += (PAGE_SIZE << 1); + } + } else { + local_flush_tlb_kernel(); + } + local_irq_restore(flags); +} + +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + int cpu = smp_processor_id(); + + if (asid_valid(vma->vm_mm, cpu)) { + int newpid; + + newpid = cpu_asid(cpu, vma->vm_mm); + page &= (PAGE_MASK << 1); + invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page); + } else { + cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm)); + } +} + +/* + * This one is only used for pages with the global bit set so we don't care + * much about the ASID. + */ +void local_flush_tlb_one(unsigned long page) +{ + page &= (PAGE_MASK << 1); + invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page); +} + +static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ +#ifdef CONFIG_HUGETLB_PAGE + int idx; + unsigned long lo; + unsigned long flags; + + local_irq_save(flags); + + address &= (PAGE_MASK << 1); + write_csr_entryhi(address); + tlb_probe(); + idx = read_csr_tlbidx(); + write_csr_pagesize(PS_HUGE_SIZE); + lo = pmd_to_entrylo(pte_val(*ptep)); + write_csr_entrylo0(lo); + write_csr_entrylo1(lo + (HPAGE_SIZE >> 1)); + + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + write_csr_pagesize(PS_DEFAULT_SIZE); + + local_irq_restore(flags); +#endif +} + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ + int idx; + unsigned long flags; + + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) + return; + + if (pte_val(*ptep) & _PAGE_HUGE) { + __update_hugetlb(vma, address, ptep); + return; + } + + local_irq_save(flags); + + if ((unsigned long)ptep & sizeof(pte_t)) + ptep--; + + address &= (PAGE_MASK << 1); + write_csr_entryhi(address); + tlb_probe(); + idx = read_csr_tlbidx(); + write_csr_pagesize(PS_DEFAULT_SIZE); + write_csr_entrylo0(pte_val(*ptep++)); + write_csr_entrylo1(pte_val(*ptep)); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + + local_irq_restore(flags); +} + +static void setup_ptwalker(void) +{ + unsigned long pwctl0, pwctl1; + unsigned long pgd_i = 0, pgd_w = 0; + unsigned long pud_i = 0, pud_w = 0; + unsigned long pmd_i = 0, pmd_w = 0; + unsigned long pte_i = 0, pte_w = 0; + + pgd_i = PGDIR_SHIFT; + pgd_w = PAGE_SHIFT - 3; +#if CONFIG_PGTABLE_LEVELS > 3 + pud_i = PUD_SHIFT; + pud_w = PAGE_SHIFT - 3; +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + pmd_i = PMD_SHIFT; + pmd_w = PAGE_SHIFT - 3; +#endif + pte_i = PAGE_SHIFT; + pte_w = PAGE_SHIFT - 3; + + pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25; + pwctl1 = pgd_i | pgd_w << 6; + + csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0); + csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1); + csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH); + csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID); +} + +static void output_pgtable_bits_defines(void) +{ +#define pr_define(fmt, ...) \ + pr_debug("#define " fmt, ##__VA_ARGS__) + + pr_debug("#include <asm/asm.h>\n"); + pr_debug("#include <asm/regdef.h>\n"); + pr_debug("\n"); + + pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); + pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); + pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); + pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); + pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); + pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); + pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); + pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); + pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); + pr_debug("\n"); +} + +#ifdef CONFIG_NUMA +static unsigned long pcpu_handlers[NR_CPUS]; +#endif +extern long exception_handlers[VECSIZE * 128 / sizeof(long)]; + +void setup_tlb_handler(int cpu) +{ + setup_ptwalker(); + local_flush_tlb_all(); + + /* The tlb handlers are generated only once */ + if (cpu == 0) { + memcpy((void *)tlbrentry, handle_tlb_refill, 0x80); + local_flush_icache_range(tlbrentry, tlbrentry + 0x80); + set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE); + set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE); + set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE); + set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE); + set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE); + set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE); + set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE); + } +#ifdef CONFIG_NUMA + else { + void *addr; + struct page *page; + const int vec_sz = sizeof(exception_handlers); + + if (pcpu_handlers[cpu]) + return; + + page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz)); + if (!page) + return; + + addr = page_address(page); + pcpu_handlers[cpu] = (unsigned long)addr; + memcpy((void *)addr, (void *)eentry, vec_sz); + local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz); + csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY); + csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY); + csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY); + } +#endif +} + +void tlb_init(int cpu) +{ + write_csr_pagesize(PS_DEFAULT_SIZE); + write_csr_stlbpgsize(PS_DEFAULT_SIZE); + write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE); + + setup_tlb_handler(cpu); + output_pgtable_bits_defines(); +} diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S new file mode 100644 index 000000000..58781c6e4 --- /dev/null +++ b/arch/loongarch/mm/tlbex.S @@ -0,0 +1,505 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <asm/asm.h> +#include <asm/export.h> +#include <asm/loongarch.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> + +#define INVTLB_ADDR_GFALSE_AND_ASID 5 + +#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3) +#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3) +#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3) +#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3) + + .macro tlb_do_page_fault, write + SYM_FUNC_START(tlb_do_page_fault_\write) + SAVE_ALL + csrrd a2, LOONGARCH_CSR_BADV + move a0, sp + REG_S a2, sp, PT_BVADDR + li.w a1, \write + la.abs t0, do_page_fault + jirl ra, t0, 0 + RESTORE_ALL_AND_RET + SYM_FUNC_END(tlb_do_page_fault_\write) + .endm + + tlb_do_page_fault 0 + tlb_do_page_fault 1 + +SYM_FUNC_START(handle_tlb_protect) + BACKUP_T0T1 + SAVE_ALL + move a0, sp + move a1, zero + csrrd a2, LOONGARCH_CSR_BADV + REG_S a2, sp, PT_BVADDR + la.abs t0, do_page_fault + jirl ra, t0, 0 + RESTORE_ALL_AND_RET +SYM_FUNC_END(handle_tlb_protect) + +SYM_FUNC_START(handle_tlb_load) + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 + + /* + * The vmalloc handling is not in the hotpath. + */ + csrrd t0, LOONGARCH_CSR_BADV + bltz t0, vmalloc_load + csrrd t1, LOONGARCH_CSR_PGDL + +vmalloc_done_load: + /* Get PGD offset in bytes */ + bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT + alsl.d t1, ra, t1, 3 +#if CONFIG_PGTABLE_LEVELS > 3 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + alsl.d t1, ra, t1, 3 +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + alsl.d t1, ra, t1, 3 +#endif + ld.d ra, t1, 0 + + /* + * For huge tlb entries, pmde doesn't contain an address but + * instead contains the tlb pte. Check the PAGE_HUGE bit and + * see if we need to jump to huge tlb processing. + */ + rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + bltz ra, tlb_huge_update_load + + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + alsl.d t1, t0, ra, _PTE_T_LOG2 + +#ifdef CONFIG_SMP +smp_pgtable_change_load: + ll.d t0, t1, 0 +#else + ld.d t0, t1, 0 +#endif + andi ra, t0, _PAGE_PRESENT + beqz ra, nopage_tlb_load + + ori t0, t0, _PAGE_VALID +#ifdef CONFIG_SMP + sc.d t0, t1, 0 + beqz t0, smp_pgtable_change_load +#else + st.d t0, t1, 0 +#endif + tlbsrch + bstrins.d t1, zero, 3, 3 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbwr + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + +#ifdef CONFIG_64BIT +vmalloc_load: + la.abs t1, swapper_pg_dir + b vmalloc_done_load +#endif + + /* This is the entry point of a huge page. */ +tlb_huge_update_load: +#ifdef CONFIG_SMP + ll.d ra, t1, 0 +#endif + andi t0, ra, _PAGE_PRESENT + beqz t0, nopage_tlb_load + +#ifdef CONFIG_SMP + ori t0, ra, _PAGE_VALID + sc.d t0, t1, 0 + beqz t0, tlb_huge_update_load + ori t0, ra, _PAGE_VALID +#else + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + ori t0, ra, _PAGE_VALID + st.d t0, t1, 0 +#endif + csrrd ra, LOONGARCH_CSR_ASID + csrrd t1, LOONGARCH_CSR_BADV + andi ra, ra, CSR_ASID_ASID + invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1 + + /* + * A huge PTE describes an area the size of the + * configured huge page size. This is twice the + * of the large TLB entry size we intend to use. + * A TLB entry half the size of the configured + * huge page size is configured into entrylo0 + * and entrylo1 to cover the contiguous huge PTE + * address space. + */ + /* Huge page: Move Global bit */ + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 + + move ra, t0 + csrwr ra, LOONGARCH_CSR_TLBELO0 + + /* Convert to entrylo1 */ + addi.d t1, zero, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 + + /* Set huge page tlb entry size */ + addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + tlbfill + + addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + +nopage_tlb_load: + dbar 0 + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_0 + jr t0 +SYM_FUNC_END(handle_tlb_load) + +SYM_FUNC_START(handle_tlb_store) + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 + + /* + * The vmalloc handling is not in the hotpath. + */ + csrrd t0, LOONGARCH_CSR_BADV + bltz t0, vmalloc_store + csrrd t1, LOONGARCH_CSR_PGDL + +vmalloc_done_store: + /* Get PGD offset in bytes */ + bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT + alsl.d t1, ra, t1, 3 +#if CONFIG_PGTABLE_LEVELS > 3 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + alsl.d t1, ra, t1, 3 +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + alsl.d t1, ra, t1, 3 +#endif + ld.d ra, t1, 0 + + /* + * For huge tlb entries, pmde doesn't contain an address but + * instead contains the tlb pte. Check the PAGE_HUGE bit and + * see if we need to jump to huge tlb processing. + */ + rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + bltz ra, tlb_huge_update_store + + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + alsl.d t1, t0, ra, _PTE_T_LOG2 + +#ifdef CONFIG_SMP +smp_pgtable_change_store: + ll.d t0, t1, 0 +#else + ld.d t0, t1, 0 +#endif + andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE + xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE + bnez ra, nopage_tlb_store + + ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) +#ifdef CONFIG_SMP + sc.d t0, t1, 0 + beqz t0, smp_pgtable_change_store +#else + st.d t0, t1, 0 +#endif + tlbsrch + bstrins.d t1, zero, 3, 3 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbwr + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + +#ifdef CONFIG_64BIT +vmalloc_store: + la.abs t1, swapper_pg_dir + b vmalloc_done_store +#endif + + /* This is the entry point of a huge page. */ +tlb_huge_update_store: +#ifdef CONFIG_SMP + ll.d ra, t1, 0 +#endif + andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE + xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE + bnez t0, nopage_tlb_store + +#ifdef CONFIG_SMP + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + sc.d t0, t1, 0 + beqz t0, tlb_huge_update_store + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) +#else + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + st.d t0, t1, 0 +#endif + csrrd ra, LOONGARCH_CSR_ASID + csrrd t1, LOONGARCH_CSR_BADV + andi ra, ra, CSR_ASID_ASID + invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1 + + /* + * A huge PTE describes an area the size of the + * configured huge page size. This is twice the + * of the large TLB entry size we intend to use. + * A TLB entry half the size of the configured + * huge page size is configured into entrylo0 + * and entrylo1 to cover the contiguous huge PTE + * address space. + */ + /* Huge page: Move Global bit */ + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 + + move ra, t0 + csrwr ra, LOONGARCH_CSR_TLBELO0 + + /* Convert to entrylo1 */ + addi.d t1, zero, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 + + /* Set huge page tlb entry size */ + addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + tlbfill + + /* Reset default page size */ + addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + +nopage_tlb_store: + dbar 0 + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_1 + jr t0 +SYM_FUNC_END(handle_tlb_store) + +SYM_FUNC_START(handle_tlb_modify) + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 + + /* + * The vmalloc handling is not in the hotpath. + */ + csrrd t0, LOONGARCH_CSR_BADV + bltz t0, vmalloc_modify + csrrd t1, LOONGARCH_CSR_PGDL + +vmalloc_done_modify: + /* Get PGD offset in bytes */ + bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT + alsl.d t1, ra, t1, 3 +#if CONFIG_PGTABLE_LEVELS > 3 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + alsl.d t1, ra, t1, 3 +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + alsl.d t1, ra, t1, 3 +#endif + ld.d ra, t1, 0 + + /* + * For huge tlb entries, pmde doesn't contain an address but + * instead contains the tlb pte. Check the PAGE_HUGE bit and + * see if we need to jump to huge tlb processing. + */ + rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + bltz ra, tlb_huge_update_modify + + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + alsl.d t1, t0, ra, _PTE_T_LOG2 + +#ifdef CONFIG_SMP +smp_pgtable_change_modify: + ll.d t0, t1, 0 +#else + ld.d t0, t1, 0 +#endif + andi ra, t0, _PAGE_WRITE + beqz ra, nopage_tlb_modify + + ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) +#ifdef CONFIG_SMP + sc.d t0, t1, 0 + beqz t0, smp_pgtable_change_modify +#else + st.d t0, t1, 0 +#endif + tlbsrch + bstrins.d t1, zero, 3, 3 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbwr + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + +#ifdef CONFIG_64BIT +vmalloc_modify: + la.abs t1, swapper_pg_dir + b vmalloc_done_modify +#endif + + /* This is the entry point of a huge page. */ +tlb_huge_update_modify: +#ifdef CONFIG_SMP + ll.d ra, t1, 0 +#endif + andi t0, ra, _PAGE_WRITE + beqz t0, nopage_tlb_modify + +#ifdef CONFIG_SMP + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + sc.d t0, t1, 0 + beqz t0, tlb_huge_update_modify + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) +#else + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + st.d t0, t1, 0 +#endif + csrrd ra, LOONGARCH_CSR_ASID + csrrd t1, LOONGARCH_CSR_BADV + andi ra, ra, CSR_ASID_ASID + invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1 + + /* + * A huge PTE describes an area the size of the + * configured huge page size. This is twice the + * of the large TLB entry size we intend to use. + * A TLB entry half the size of the configured + * huge page size is configured into entrylo0 + * and entrylo1 to cover the contiguous huge PTE + * address space. + */ + /* Huge page: Move Global bit */ + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 + + move ra, t0 + csrwr ra, LOONGARCH_CSR_TLBELO0 + + /* Convert to entrylo1 */ + addi.d t1, zero, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 + + /* Set huge page tlb entry size */ + addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + tlbfill + + /* Reset default page size */ + addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + +nopage_tlb_modify: + dbar 0 + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_1 + jr t0 +SYM_FUNC_END(handle_tlb_modify) + +SYM_FUNC_START(handle_tlb_refill) + csrwr t0, LOONGARCH_CSR_TLBRSAVE + csrrd t0, LOONGARCH_CSR_PGD + lddir t0, t0, 3 +#if CONFIG_PGTABLE_LEVELS > 3 + lddir t0, t0, 2 +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + lddir t0, t0, 1 +#endif + ldpte t0, 0 + ldpte t0, 1 + tlbfill + csrrd t0, LOONGARCH_CSR_TLBRSAVE + ertn +SYM_FUNC_END(handle_tlb_refill) diff --git a/arch/loongarch/net/Makefile b/arch/loongarch/net/Makefile new file mode 100644 index 000000000..1ec12a0c3 --- /dev/null +++ b/arch/loongarch/net/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for arch/loongarch/net +# +# Copyright (C) 2022 Loongson Technology Corporation Limited +# +obj-$(CONFIG_BPF_JIT) += bpf_jit.o diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c new file mode 100644 index 000000000..4e86441e6 --- /dev/null +++ b/arch/loongarch/net/bpf_jit.c @@ -0,0 +1,1173 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * BPF JIT compiler for LoongArch + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include "bpf_jit.h" + +#define REG_TCC LOONGARCH_GPR_A6 +#define TCC_SAVED LOONGARCH_GPR_S5 + +#define SAVE_RA BIT(0) +#define SAVE_TCC BIT(1) + +static const int regmap[] = { + /* return value from in-kernel function, and exit value for eBPF program */ + [BPF_REG_0] = LOONGARCH_GPR_A5, + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = LOONGARCH_GPR_A0, + [BPF_REG_2] = LOONGARCH_GPR_A1, + [BPF_REG_3] = LOONGARCH_GPR_A2, + [BPF_REG_4] = LOONGARCH_GPR_A3, + [BPF_REG_5] = LOONGARCH_GPR_A4, + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = LOONGARCH_GPR_S0, + [BPF_REG_7] = LOONGARCH_GPR_S1, + [BPF_REG_8] = LOONGARCH_GPR_S2, + [BPF_REG_9] = LOONGARCH_GPR_S3, + /* read-only frame pointer to access stack */ + [BPF_REG_FP] = LOONGARCH_GPR_S4, + /* temporary register for blinding constants */ + [BPF_REG_AX] = LOONGARCH_GPR_T0, +}; + +static void mark_call(struct jit_ctx *ctx) +{ + ctx->flags |= SAVE_RA; +} + +static void mark_tail_call(struct jit_ctx *ctx) +{ + ctx->flags |= SAVE_TCC; +} + +static bool seen_call(struct jit_ctx *ctx) +{ + return (ctx->flags & SAVE_RA); +} + +static bool seen_tail_call(struct jit_ctx *ctx) +{ + return (ctx->flags & SAVE_TCC); +} + +static u8 tail_call_reg(struct jit_ctx *ctx) +{ + if (seen_call(ctx)) + return TCC_SAVED; + + return REG_TCC; +} + +/* + * eBPF prog stack layout: + * + * high + * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP + * | $ra | + * +-------------------------+ + * | $fp | + * +-------------------------+ + * | $s0 | + * +-------------------------+ + * | $s1 | + * +-------------------------+ + * | $s2 | + * +-------------------------+ + * | $s3 | + * +-------------------------+ + * | $s4 | + * +-------------------------+ + * | $s5 | + * +-------------------------+ <--BPF_REG_FP + * | prog->aux->stack_depth | + * | (optional) | + * current $sp -------------> +-------------------------+ + * low + */ +static void build_prologue(struct jit_ctx *ctx) +{ + int stack_adjust = 0, store_offset, bpf_stack_adjust; + + bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); + + /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */ + stack_adjust += sizeof(long) * 8; + + stack_adjust = round_up(stack_adjust, 16); + stack_adjust += bpf_stack_adjust; + + /* + * First instruction initializes the tail call count (TCC). + * On tail call we skip this instruction, and the TCC is + * passed in REG_TCC from the caller. + */ + emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT); + + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust); + + store_offset = stack_adjust - sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset); + + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust); + + if (bpf_stack_adjust) + emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust); + + /* + * Program contains calls and tail calls, so REG_TCC need + * to be saved across calls. + */ + if (seen_tail_call(ctx) && seen_call(ctx)) + move_reg(ctx, TCC_SAVED, REG_TCC); + + ctx->stack_size = stack_adjust; +} + +static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) +{ + int stack_adjust = ctx->stack_size; + int load_offset; + + load_offset = stack_adjust - sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset); + + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust); + + if (!is_tail_call) { + /* Set return value */ + move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]); + /* Return to the caller */ + emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0); + } else { + /* + * Call the next bpf prog and skip the first instruction + * of TCC initialization. + */ + emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1); + } +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + __build_epilogue(ctx, false); +} + +bool bpf_jit_supports_kfunc_call(void) +{ + return true; +} + +/* initialized on the first pass of build_body() */ +static int out_offset = -1; +static int emit_bpf_tail_call(struct jit_ctx *ctx) +{ + int off; + u8 tcc = tail_call_reg(ctx); + u8 a1 = LOONGARCH_GPR_A1; + u8 a2 = LOONGARCH_GPR_A2; + u8 t1 = LOONGARCH_GPR_T1; + u8 t2 = LOONGARCH_GPR_T2; + u8 t3 = LOONGARCH_GPR_T3; + const int idx0 = ctx->idx; + +#define cur_offset (ctx->idx - idx0) +#define jmp_offset (out_offset - (cur_offset)) + + /* + * a0: &ctx + * a1: &array + * a2: index + * + * if (index >= array->map.max_entries) + * goto out; + */ + off = offsetof(struct bpf_array, map.max_entries); + emit_insn(ctx, ldwu, t1, a1, off); + /* bgeu $a2, $t1, jmp_offset */ + if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0) + goto toofar; + + /* + * if (--TCC < 0) + * goto out; + */ + emit_insn(ctx, addid, REG_TCC, tcc, -1); + if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + goto toofar; + + /* + * prog = array->ptrs[index]; + * if (!prog) + * goto out; + */ + emit_insn(ctx, alsld, t2, a2, a1, 2); + off = offsetof(struct bpf_array, ptrs); + emit_insn(ctx, ldd, t2, t2, off); + /* beq $t2, $zero, jmp_offset */ + if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + goto toofar; + + /* goto *(prog->bpf_func + 4); */ + off = offsetof(struct bpf_prog, bpf_func); + emit_insn(ctx, ldd, t3, t2, off); + __build_epilogue(ctx, true); + + /* out: */ + if (out_offset == -1) + out_offset = cur_offset; + if (cur_offset != out_offset) { + pr_err_once("tail_call out_offset = %d, expected %d!\n", + cur_offset, out_offset); + return -1; + } + + return 0; + +toofar: + pr_info_once("tail_call: jump too far\n"); + return -1; +#undef cur_offset +#undef jmp_offset +} + +static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 t1 = LOONGARCH_GPR_T1; + const u8 t2 = LOONGARCH_GPR_T2; + const u8 t3 = LOONGARCH_GPR_T3; + const u8 r0 = regmap[BPF_REG_0]; + const u8 src = regmap[insn->src_reg]; + const u8 dst = regmap[insn->dst_reg]; + const s16 off = insn->off; + const s32 imm = insn->imm; + const bool isdw = BPF_SIZE(insn->code) == BPF_DW; + + move_imm(ctx, t1, off, false); + emit_insn(ctx, addd, t1, dst, t1); + move_reg(ctx, t3, src); + + switch (imm) { + /* lock *(size *)(dst + off) <op>= src */ + case BPF_ADD: + if (isdw) + emit_insn(ctx, amaddd, t2, t1, src); + else + emit_insn(ctx, amaddw, t2, t1, src); + break; + case BPF_AND: + if (isdw) + emit_insn(ctx, amandd, t2, t1, src); + else + emit_insn(ctx, amandw, t2, t1, src); + break; + case BPF_OR: + if (isdw) + emit_insn(ctx, amord, t2, t1, src); + else + emit_insn(ctx, amorw, t2, t1, src); + break; + case BPF_XOR: + if (isdw) + emit_insn(ctx, amxord, t2, t1, src); + else + emit_insn(ctx, amxorw, t2, t1, src); + break; + /* src = atomic_fetch_<op>(dst + off, src) */ + case BPF_ADD | BPF_FETCH: + if (isdw) { + emit_insn(ctx, amaddd, src, t1, t3); + } else { + emit_insn(ctx, amaddw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + case BPF_AND | BPF_FETCH: + if (isdw) { + emit_insn(ctx, amandd, src, t1, t3); + } else { + emit_insn(ctx, amandw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + case BPF_OR | BPF_FETCH: + if (isdw) { + emit_insn(ctx, amord, src, t1, t3); + } else { + emit_insn(ctx, amorw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + case BPF_XOR | BPF_FETCH: + if (isdw) { + emit_insn(ctx, amxord, src, t1, t3); + } else { + emit_insn(ctx, amxorw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + /* src = atomic_xchg(dst + off, src); */ + case BPF_XCHG: + if (isdw) { + emit_insn(ctx, amswapd, src, t1, t3); + } else { + emit_insn(ctx, amswapw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + /* r0 = atomic_cmpxchg(dst + off, r0, src); */ + case BPF_CMPXCHG: + move_reg(ctx, t2, r0); + if (isdw) { + emit_insn(ctx, lld, r0, t1, 0); + emit_insn(ctx, bne, t2, r0, 4); + move_reg(ctx, t3, src); + emit_insn(ctx, scd, t3, t1, 0); + emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4); + } else { + emit_insn(ctx, llw, r0, t1, 0); + emit_zext_32(ctx, t2, true); + emit_zext_32(ctx, r0, true); + emit_insn(ctx, bne, t2, r0, 4); + move_reg(ctx, t3, src); + emit_insn(ctx, scw, t3, t1, 0); + emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6); + emit_zext_32(ctx, r0, true); + } + break; + } +} + +static bool is_signed_bpf_cond(u8 cond) +{ + return cond == BPF_JSGT || cond == BPF_JSLT || + cond == BPF_JSGE || cond == BPF_JSLE; +} + +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass) +{ + u8 tm = -1; + u64 func_addr; + bool func_addr_fixed; + int i = insn - ctx->prog->insnsi; + int ret, jmp_offset; + const u8 code = insn->code; + const u8 cond = BPF_OP(code); + const u8 t1 = LOONGARCH_GPR_T1; + const u8 t2 = LOONGARCH_GPR_T2; + const u8 src = regmap[insn->src_reg]; + const u8 dst = regmap[insn->dst_reg]; + const s16 off = insn->off; + const s32 imm = insn->imm; + const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32; + + switch (code) { + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_X: + move_reg(ctx, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_K: + move_imm(ctx, dst, imm, is32); + break; + + /* dst = dst + src */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + emit_insn(ctx, addd, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst + imm */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + if (is_signed_imm12(imm)) { + emit_insn(ctx, addid, dst, dst, imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, addd, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst - src */ + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + emit_insn(ctx, subd, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst - imm */ + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + if (is_signed_imm12(-imm)) { + emit_insn(ctx, addid, dst, dst, -imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, subd, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst * src */ + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + emit_insn(ctx, muld, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst * imm */ + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, muld, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst / src */ + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + emit_zext_32(ctx, dst, is32); + move_reg(ctx, t1, src); + emit_zext_32(ctx, t1, is32); + emit_insn(ctx, divdu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst / imm */ + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + move_imm(ctx, t1, imm, is32); + emit_zext_32(ctx, dst, is32); + emit_insn(ctx, divdu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst % src */ + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_zext_32(ctx, dst, is32); + move_reg(ctx, t1, src); + emit_zext_32(ctx, t1, is32); + emit_insn(ctx, moddu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst % imm */ + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + move_imm(ctx, t1, imm, is32); + emit_zext_32(ctx, dst, is32); + emit_insn(ctx, moddu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst & src */ + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + emit_insn(ctx, and, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst & imm */ + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + if (is_unsigned_imm12(imm)) { + emit_insn(ctx, andi, dst, dst, imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, and, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst | src */ + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + emit_insn(ctx, or, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst | imm */ + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + if (is_unsigned_imm12(imm)) { + emit_insn(ctx, ori, dst, dst, imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, or, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst ^ src */ + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + emit_insn(ctx, xor, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst ^ imm */ + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + if (is_unsigned_imm12(imm)) { + emit_insn(ctx, xori, dst, dst, imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, xor, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst << src (logical) */ + case BPF_ALU | BPF_LSH | BPF_X: + emit_insn(ctx, sllw, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_LSH | BPF_X: + emit_insn(ctx, slld, dst, dst, src); + break; + + /* dst = dst << imm (logical) */ + case BPF_ALU | BPF_LSH | BPF_K: + emit_insn(ctx, slliw, dst, dst, imm); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_LSH | BPF_K: + emit_insn(ctx, sllid, dst, dst, imm); + break; + + /* dst = dst >> src (logical) */ + case BPF_ALU | BPF_RSH | BPF_X: + emit_insn(ctx, srlw, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_RSH | BPF_X: + emit_insn(ctx, srld, dst, dst, src); + break; + + /* dst = dst >> imm (logical) */ + case BPF_ALU | BPF_RSH | BPF_K: + emit_insn(ctx, srliw, dst, dst, imm); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_RSH | BPF_K: + emit_insn(ctx, srlid, dst, dst, imm); + break; + + /* dst = dst >> src (arithmetic) */ + case BPF_ALU | BPF_ARSH | BPF_X: + emit_insn(ctx, sraw, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit_insn(ctx, srad, dst, dst, src); + break; + + /* dst = dst >> imm (arithmetic) */ + case BPF_ALU | BPF_ARSH | BPF_K: + emit_insn(ctx, sraiw, dst, dst, imm); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_ARSH | BPF_K: + emit_insn(ctx, sraid, dst, dst, imm); + break; + + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + switch (imm) { + case 16: + /* zero-extend 16 bits into 64 bits */ + emit_insn(ctx, bstrpickd, dst, dst, 15, 0); + break; + case 32: + /* zero-extend 32 bits into 64 bits */ + emit_zext_32(ctx, dst, is32); + break; + case 64: + /* do nothing */ + break; + } + break; + + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (imm) { + case 16: + emit_insn(ctx, revb2h, dst, dst); + /* zero-extend 16 bits into 64 bits */ + emit_insn(ctx, bstrpickd, dst, dst, 15, 0); + break; + case 32: + emit_insn(ctx, revb2w, dst, dst); + /* zero-extend 32 bits into 64 bits */ + emit_zext_32(ctx, dst, is32); + break; + case 64: + emit_insn(ctx, revbd, dst, dst); + break; + } + break; + + /* PC += off if dst cond src */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + jmp_offset = bpf2la_offset(i, off, ctx); + move_reg(ctx, t1, dst); + move_reg(ctx, t2, src); + if (is_signed_bpf_cond(BPF_OP(code))) { + emit_sext_32(ctx, t1, is32); + emit_sext_32(ctx, t2, is32); + } else { + emit_zext_32(ctx, t1, is32); + emit_zext_32(ctx, t2, is32); + } + if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0) + goto toofar; + break; + + /* PC += off if dst cond imm */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + jmp_offset = bpf2la_offset(i, off, ctx); + if (imm) { + move_imm(ctx, t1, imm, false); + tm = t1; + } else { + /* If imm is 0, simply use zero register. */ + tm = LOONGARCH_GPR_ZERO; + } + move_reg(ctx, t2, dst); + if (is_signed_bpf_cond(BPF_OP(code))) { + emit_sext_32(ctx, tm, is32); + emit_sext_32(ctx, t2, is32); + } else { + emit_zext_32(ctx, tm, is32); + emit_zext_32(ctx, t2, is32); + } + if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0) + goto toofar; + break; + + /* PC += off if dst & src */ + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + jmp_offset = bpf2la_offset(i, off, ctx); + emit_insn(ctx, and, t1, dst, src); + emit_zext_32(ctx, t1, is32); + if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + goto toofar; + break; + + /* PC += off if dst & imm */ + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + jmp_offset = bpf2la_offset(i, off, ctx); + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, and, t1, dst, t1); + emit_zext_32(ctx, t1, is32); + if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + goto toofar; + break; + + /* PC += off */ + case BPF_JMP | BPF_JA: + jmp_offset = bpf2la_offset(i, off, ctx); + if (emit_uncond_jmp(ctx, jmp_offset) < 0) + goto toofar; + break; + + /* function call */ + case BPF_JMP | BPF_CALL: + mark_call(ctx); + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, + &func_addr, &func_addr_fixed); + if (ret < 0) + return ret; + + move_addr(ctx, t1, func_addr); + emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0); + move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + break; + + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + mark_tail_call(ctx); + if (emit_bpf_tail_call(ctx) < 0) + return -EINVAL; + break; + + /* function return */ + case BPF_JMP | BPF_EXIT: + if (i == ctx->prog->len - 1) + break; + + jmp_offset = epilogue_offset(ctx); + if (emit_uncond_jmp(ctx, jmp_offset) < 0) + goto toofar; + break; + + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm; + + move_imm(ctx, dst, imm64, is32); + return 1; + } + + /* dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_B: + if (is_signed_imm12(off)) { + emit_insn(ctx, ldbu, dst, src, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, ldxbu, dst, src, t1); + } + break; + case BPF_H: + if (is_signed_imm12(off)) { + emit_insn(ctx, ldhu, dst, src, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, ldxhu, dst, src, t1); + } + break; + case BPF_W: + if (is_signed_imm12(off)) { + emit_insn(ctx, ldwu, dst, src, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, ldptrw, dst, src, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, ldxwu, dst, src, t1); + } + break; + case BPF_DW: + move_imm(ctx, t1, off, is32); + emit_insn(ctx, ldxd, dst, src, t1); + break; + } + break; + + /* *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_B: + move_imm(ctx, t1, imm, is32); + if (is_signed_imm12(off)) { + emit_insn(ctx, stb, t1, dst, off); + } else { + move_imm(ctx, t2, off, is32); + emit_insn(ctx, stxb, t1, dst, t2); + } + break; + case BPF_H: + move_imm(ctx, t1, imm, is32); + if (is_signed_imm12(off)) { + emit_insn(ctx, sth, t1, dst, off); + } else { + move_imm(ctx, t2, off, is32); + emit_insn(ctx, stxh, t1, dst, t2); + } + break; + case BPF_W: + move_imm(ctx, t1, imm, is32); + if (is_signed_imm12(off)) { + emit_insn(ctx, stw, t1, dst, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, stptrw, t1, dst, off); + } else { + move_imm(ctx, t2, off, is32); + emit_insn(ctx, stxw, t1, dst, t2); + } + break; + case BPF_DW: + move_imm(ctx, t1, imm, is32); + if (is_signed_imm12(off)) { + emit_insn(ctx, std, t1, dst, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, stptrd, t1, dst, off); + } else { + move_imm(ctx, t2, off, is32); + emit_insn(ctx, stxd, t1, dst, t2); + } + break; + } + break; + + /* *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_B: + if (is_signed_imm12(off)) { + emit_insn(ctx, stb, src, dst, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, stxb, src, dst, t1); + } + break; + case BPF_H: + if (is_signed_imm12(off)) { + emit_insn(ctx, sth, src, dst, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, stxh, src, dst, t1); + } + break; + case BPF_W: + if (is_signed_imm12(off)) { + emit_insn(ctx, stw, src, dst, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, stptrw, src, dst, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, stxw, src, dst, t1); + } + break; + case BPF_DW: + if (is_signed_imm12(off)) { + emit_insn(ctx, std, src, dst, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, stptrd, src, dst, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, stxd, src, dst, t1); + } + break; + } + break; + + case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_ATOMIC | BPF_DW: + emit_atomic(insn, ctx); + break; + + /* Speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + + default: + pr_err("bpf_jit: unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; + +toofar: + pr_info_once("bpf_jit: opcode %02x, jump too far\n", code); + return -E2BIG; +} + +static int build_body(struct jit_ctx *ctx, bool extra_pass) +{ + int i; + const struct bpf_prog *prog = ctx->prog; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; + + ret = build_insn(insn, ctx, extra_pass); + if (ret > 0) { + i++; + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; + continue; + } + if (ret) + return ret; + } + + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; + + return 0; +} + +/* Fill space with break instructions */ +static void jit_fill_hole(void *area, unsigned int size) +{ + u32 *ptr; + + /* We are guaranteed to have aligned memory */ + for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) + *ptr++ = INSN_BREAK; +} + +static int validate_code(struct jit_ctx *ctx) +{ + int i; + union loongarch_instruction insn; + + for (i = 0; i < ctx->idx; i++) { + insn = ctx->image[i]; + /* Check INSN_BREAK */ + if (insn.word == INSN_BREAK) + return -1; + } + + return 0; +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + bool tmp_blinded = false, extra_pass = false; + u8 *image_ptr; + int image_size; + struct jit_ctx ctx; + struct jit_data *jit_data; + struct bpf_binary_header *header; + struct bpf_prog *tmp, *orig_prog = prog; + + /* + * If BPF JIT was not enabled then we must fall back to + * the interpreter. + */ + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* + * If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. Otherwise, we save + * the new JITed code. + */ + if (IS_ERR(tmp)) + return orig_prog; + + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + if (jit_data->ctx.offset) { + ctx = jit_data->ctx; + image_ptr = jit_data->image; + header = jit_data->header; + extra_pass = true; + image_size = sizeof(u32) * ctx.idx; + goto skip_init_ctx; + } + + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL); + if (ctx.offset == NULL) { + prog = orig_prog; + goto out_offset; + } + + /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */ + build_prologue(&ctx); + if (build_body(&ctx, extra_pass)) { + prog = orig_prog; + goto out_offset; + } + ctx.epilogue_offset = ctx.idx; + build_epilogue(&ctx); + + /* Now we know the actual image size. + * As each LoongArch instruction is of length 32bit, + * we are translating number of JITed intructions into + * the size required to store these JITed code. + */ + image_size = sizeof(u32) * ctx.idx; + /* Now we know the size of the structure to make */ + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) { + prog = orig_prog; + goto out_offset; + } + + /* 2. Now, the actual pass to generate final JIT code */ + ctx.image = (union loongarch_instruction *)image_ptr; + +skip_init_ctx: + ctx.idx = 0; + + build_prologue(&ctx); + if (build_body(&ctx, extra_pass)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_offset; + } + build_epilogue(&ctx); + + /* 3. Extra pass to validate JITed code */ + if (validate_code(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_offset; + } + + /* And we're done */ + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, image_size, 2, ctx.image); + + /* Update the icache */ + flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx)); + + if (!prog->is_func || extra_pass) { + if (extra_pass && ctx.idx != jit_data->ctx.idx) { + pr_err_once("multi-func JIT bug %d != %d\n", + ctx.idx, jit_data->ctx.idx); + bpf_jit_binary_free(header); + prog->bpf_func = NULL; + prog->jited = 0; + prog->jited_len = 0; + goto out_offset; + } + bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; + jit_data->header = header; + } + prog->jited = 1; + prog->jited_len = image_size; + prog->bpf_func = (void *)ctx.image; + + if (!prog->is_func || extra_pass) { + int i; + + /* offset[prog->len] is the size of program */ + for (i = 0; i <= prog->len; i++) + ctx.offset[i] *= LOONGARCH_INSN_SIZE; + bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); + +out_offset: + kvfree(ctx.offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } + +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); + + out_offset = -1; + + return prog; +} diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h new file mode 100644 index 000000000..684b22f54 --- /dev/null +++ b/arch/loongarch/net/bpf_jit.h @@ -0,0 +1,303 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * BPF JIT compiler for LoongArch + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include <linux/bpf.h> +#include <linux/filter.h> +#include <asm/cacheflush.h> +#include <asm/inst.h> + +struct jit_ctx { + const struct bpf_prog *prog; + unsigned int idx; + unsigned int flags; + unsigned int epilogue_offset; + u32 *offset; + union loongarch_instruction *image; + u32 stack_size; +}; + +struct jit_data { + struct bpf_binary_header *header; + u8 *image; + struct jit_ctx ctx; +}; + +#define emit_insn(ctx, func, ...) \ +do { \ + if (ctx->image != NULL) { \ + union loongarch_instruction *insn = &ctx->image[ctx->idx]; \ + emit_##func(insn, ##__VA_ARGS__); \ + } \ + ctx->idx++; \ +} while (0) + +#define is_signed_imm12(val) signed_imm_check(val, 12) +#define is_signed_imm14(val) signed_imm_check(val, 14) +#define is_signed_imm16(val) signed_imm_check(val, 16) +#define is_signed_imm26(val) signed_imm_check(val, 26) +#define is_signed_imm32(val) signed_imm_check(val, 32) +#define is_signed_imm52(val) signed_imm_check(val, 52) +#define is_unsigned_imm12(val) unsigned_imm_check(val, 12) + +static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx) +{ + /* BPF JMP offset is relative to the next instruction */ + bpf_insn++; + /* + * Whereas LoongArch branch instructions encode the offset + * from the branch itself, so we must subtract 1 from the + * instruction offset. + */ + return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1)); +} + +static inline int epilogue_offset(const struct jit_ctx *ctx) +{ + int from = ctx->idx; + int to = ctx->epilogue_offset; + + return (to - from); +} + +/* Zero-extend 32 bits into 64 bits */ +static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32) +{ + if (!is32) + return; + + emit_insn(ctx, lu32id, reg, 0); +} + +/* Signed-extend 32 bits into 64 bits */ +static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32) +{ + if (!is32) + return; + + emit_insn(ctx, addiw, reg, reg, 0); +} + +static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr) +{ + u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52; + + /* lu12iw rd, imm_31_12 */ + imm_31_12 = (addr >> 12) & 0xfffff; + emit_insn(ctx, lu12iw, rd, imm_31_12); + + /* ori rd, rd, imm_11_0 */ + imm_11_0 = addr & 0xfff; + emit_insn(ctx, ori, rd, rd, imm_11_0); + + /* lu32id rd, imm_51_32 */ + imm_51_32 = (addr >> 32) & 0xfffff; + emit_insn(ctx, lu32id, rd, imm_51_32); + + /* lu52id rd, rd, imm_63_52 */ + imm_63_52 = (addr >> 52) & 0xfff; + emit_insn(ctx, lu52id, rd, rd, imm_63_52); +} + +static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32) +{ + long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31; + + /* or rd, $zero, $zero */ + if (imm == 0) { + emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO); + return; + } + + /* addiw rd, $zero, imm_11_0 */ + if (is_signed_imm12(imm)) { + emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm); + goto zext; + } + + /* ori rd, $zero, imm_11_0 */ + if (is_unsigned_imm12(imm)) { + emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm); + goto zext; + } + + /* lu52id rd, $zero, imm_63_52 */ + imm_63_52 = (imm >> 52) & 0xfff; + imm_51_0 = imm & 0xfffffffffffff; + if (imm_63_52 != 0 && imm_51_0 == 0) { + emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52); + return; + } + + /* lu12iw rd, imm_31_12 */ + imm_31_12 = (imm >> 12) & 0xfffff; + emit_insn(ctx, lu12iw, rd, imm_31_12); + + /* ori rd, rd, imm_11_0 */ + imm_11_0 = imm & 0xfff; + if (imm_11_0 != 0) + emit_insn(ctx, ori, rd, rd, imm_11_0); + + if (!is_signed_imm32(imm)) { + if (imm_51_0 != 0) { + /* + * If bit[51:31] is all 0 or all 1, + * it means bit[51:32] is sign extended by lu12iw, + * no need to call lu32id to do a new filled operation. + */ + imm_51_31 = (imm >> 31) & 0x1fffff; + if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) { + /* lu32id rd, imm_51_32 */ + imm_51_32 = (imm >> 32) & 0xfffff; + emit_insn(ctx, lu32id, rd, imm_51_32); + } + } + + /* lu52id rd, rd, imm_63_52 */ + if (!is_signed_imm52(imm)) + emit_insn(ctx, lu52id, rd, rd, imm_63_52); + } + +zext: + emit_zext_32(ctx, rd, is32); +} + +static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd, + enum loongarch_gpr rj) +{ + emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO); +} + +static inline int invert_jmp_cond(u8 cond) +{ + switch (cond) { + case BPF_JEQ: + return BPF_JNE; + case BPF_JNE: + case BPF_JSET: + return BPF_JEQ; + case BPF_JGT: + return BPF_JLE; + case BPF_JGE: + return BPF_JLT; + case BPF_JLT: + return BPF_JGE; + case BPF_JLE: + return BPF_JGT; + case BPF_JSGT: + return BPF_JSLE; + case BPF_JSGE: + return BPF_JSLT; + case BPF_JSLT: + return BPF_JSGE; + case BPF_JSLE: + return BPF_JSGT; + } + return -1; +} + +static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, + enum loongarch_gpr rd, int jmp_offset) +{ + switch (cond) { + case BPF_JEQ: + /* PC += jmp_offset if rj == rd */ + emit_insn(ctx, beq, rj, rd, jmp_offset); + return; + case BPF_JNE: + case BPF_JSET: + /* PC += jmp_offset if rj != rd */ + emit_insn(ctx, bne, rj, rd, jmp_offset); + return; + case BPF_JGT: + /* PC += jmp_offset if rj > rd (unsigned) */ + emit_insn(ctx, bltu, rd, rj, jmp_offset); + return; + case BPF_JLT: + /* PC += jmp_offset if rj < rd (unsigned) */ + emit_insn(ctx, bltu, rj, rd, jmp_offset); + return; + case BPF_JGE: + /* PC += jmp_offset if rj >= rd (unsigned) */ + emit_insn(ctx, bgeu, rj, rd, jmp_offset); + return; + case BPF_JLE: + /* PC += jmp_offset if rj <= rd (unsigned) */ + emit_insn(ctx, bgeu, rd, rj, jmp_offset); + return; + case BPF_JSGT: + /* PC += jmp_offset if rj > rd (signed) */ + emit_insn(ctx, blt, rd, rj, jmp_offset); + return; + case BPF_JSLT: + /* PC += jmp_offset if rj < rd (signed) */ + emit_insn(ctx, blt, rj, rd, jmp_offset); + return; + case BPF_JSGE: + /* PC += jmp_offset if rj >= rd (signed) */ + emit_insn(ctx, bge, rj, rd, jmp_offset); + return; + case BPF_JSLE: + /* PC += jmp_offset if rj <= rd (signed) */ + emit_insn(ctx, bge, rd, rj, jmp_offset); + return; + } +} + +static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, + enum loongarch_gpr rd, int jmp_offset) +{ + cond = invert_jmp_cond(cond); + cond_jmp_offset(ctx, cond, rj, rd, 2); + emit_insn(ctx, b, jmp_offset); +} + +static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset) +{ + emit_insn(ctx, b, jmp_offset); +} + +static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, + enum loongarch_gpr rd, int jmp_offset) +{ + /* + * A large PC-relative jump offset may overflow the immediate field of + * the native conditional branch instruction, triggering a conversion + * to use an absolute jump instead, this jump sequence is particularly + * nasty. For now, use cond_jmp_offs26() directly to keep it simple. + * In the future, maybe we can add support for far branching, the branch + * relaxation requires more than two passes to converge, the code seems + * too complex to understand, not quite sure whether it is necessary and + * worth the extra pain. Anyway, just leave it as it is to enhance code + * readability now. + */ + if (is_signed_imm26(jmp_offset)) { + cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset); + return 0; + } + + return -EINVAL; +} + +static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset) +{ + if (is_signed_imm26(jmp_offset)) { + uncond_jmp_offs26(ctx, jmp_offset); + return 0; + } + + return -EINVAL; +} + +static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, + enum loongarch_gpr rd, int jmp_offset) +{ + if (is_signed_imm16(jmp_offset)) { + cond_jmp_offset(ctx, cond, rj, rd, jmp_offset); + return 0; + } + + return -EINVAL; +} diff --git a/arch/loongarch/pci/Makefile b/arch/loongarch/pci/Makefile new file mode 100644 index 000000000..8101ef3df --- /dev/null +++ b/arch/loongarch/pci/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the PCI specific kernel interface routines under Linux. +# + +obj-y += pci.o +obj-$(CONFIG_ACPI) += acpi.o diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c new file mode 100644 index 000000000..8235ec92b --- /dev/null +++ b/arch/loongarch/pci/acpi.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/pci.h> +#include <linux/acpi.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/pci-acpi.h> +#include <linux/pci-ecam.h> + +#include <asm/pci.h> +#include <asm/numa.h> +#include <asm/loongson.h> + +struct pci_root_info { + struct acpi_pci_root_info common; + struct pci_config_window *cfg; +}; + +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + struct pci_config_window *cfg = bridge->bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct device *bus_dev = &bridge->bus->dev; + + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + + return 0; +} + +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ + struct pci_config_window *cfg = bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct acpi_pci_root *root = acpi_driver_data(adev); + + return root->segment; +} + +static void acpi_release_root_info(struct acpi_pci_root_info *ci) +{ + struct pci_root_info *info; + + info = container_of(ci, struct pci_root_info, common); + pci_ecam_free(info->cfg); + kfree(ci->ops); + kfree(info); +} + +static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) +{ + int status; + struct resource_entry *entry, *tmp; + struct acpi_device *device = ci->bridge; + + status = acpi_pci_probe_root_resources(ci); + if (status > 0) { + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + if (entry->res->flags & IORESOURCE_MEM) { + entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); + entry->res->start |= entry->offset; + entry->res->end |= entry->offset; + } + } + return status; + } + + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + dev_dbg(&device->dev, + "host bridge window %pR (ignored)\n", entry->res); + resource_list_destroy_entry(entry); + } + + return 0; +} + +/* + * Create a PCI config space window + * - reserve mem region + * - alloc struct pci_config_window with space for all mappings + * - ioremap the config space + */ +static struct pci_config_window *arch_pci_ecam_create(struct device *dev, + struct resource *cfgres, struct resource *busr, const struct pci_ecam_ops *ops) +{ + int bsz, bus_range, err; + struct resource *conflict; + struct pci_config_window *cfg; + + if (busr->start > busr->end) + return ERR_PTR(-EINVAL); + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return ERR_PTR(-ENOMEM); + + cfg->parent = dev; + cfg->ops = ops; + cfg->busr.start = busr->start; + cfg->busr.end = busr->end; + cfg->busr.flags = IORESOURCE_BUS; + bus_range = resource_size(cfgres) >> ops->bus_shift; + + bsz = 1 << ops->bus_shift; + + cfg->res.start = cfgres->start; + cfg->res.end = cfgres->end; + cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + cfg->res.name = "PCI ECAM"; + + conflict = request_resource_conflict(&iomem_resource, &cfg->res); + if (conflict) { + err = -EBUSY; + dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n", + &cfg->res, conflict->name, conflict); + goto err_exit; + } + + cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz); + if (!cfg->win) + goto err_exit_iomap; + + if (ops->init) { + err = ops->init(cfg); + if (err) + goto err_exit; + } + dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr); + + return cfg; + +err_exit_iomap: + err = -ENOMEM; + dev_err(dev, "ECAM ioremap failed\n"); +err_exit: + pci_ecam_free(cfg); + return ERR_PTR(err); +} + +/* + * Lookup the bus range for the domain in MCFG, and set up config space + * mapping. + */ +static struct pci_config_window * +pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +{ + int ret, bus_shift; + u16 seg = root->segment; + struct device *dev = &root->device->dev; + struct resource cfgres; + struct resource *bus_res = &root->secondary; + struct pci_config_window *cfg; + const struct pci_ecam_ops *ecam_ops; + + ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); + if (ret < 0) { + dev_err(dev, "%04x:%pR ECAM region not found, use default value\n", seg, bus_res); + ecam_ops = &loongson_pci_ecam_ops; + root->mcfg_addr = mcfg_addr_init(0); + } + + bus_shift = ecam_ops->bus_shift ? : 20; + + if (bus_shift == 20) + cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); + else { + cfgres.start = root->mcfg_addr + (bus_res->start << bus_shift); + cfgres.end = cfgres.start + (resource_size(bus_res) << bus_shift) - 1; + cfgres.end |= BIT(28) + (((PCI_CFG_SPACE_EXP_SIZE - 1) & 0xf00) << 16); + cfgres.flags = IORESOURCE_MEM; + cfg = arch_pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); + } + + if (IS_ERR(cfg)) { + dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, PTR_ERR(cfg)); + return NULL; + } + + return cfg; +} + +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +{ + struct pci_bus *bus; + struct pci_root_info *info; + struct acpi_pci_root_ops *root_ops; + int domain = root->segment; + int busnum = root->secondary.start; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + pr_warn("pci_bus %04x:%02x: ignored (out of memory)\n", domain, busnum); + return NULL; + } + + root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); + if (!root_ops) { + kfree(info); + return NULL; + } + + info->cfg = pci_acpi_setup_ecam_mapping(root); + if (!info->cfg) { + kfree(info); + kfree(root_ops); + return NULL; + } + + root_ops->release_info = acpi_release_root_info; + root_ops->prepare_resources = acpi_prepare_root_resources; + root_ops->pci_ops = (struct pci_ops *)&info->cfg->ops->pci_ops; + + bus = pci_find_bus(domain, busnum); + if (bus) { + memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window)); + kfree(info); + } else { + struct pci_bus *child; + + bus = acpi_pci_root_create(root, root_ops, + &info->common, info->cfg); + if (!bus) { + kfree(info); + kfree(root_ops); + return NULL; + } + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + + return bus; +} diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c new file mode 100644 index 000000000..272663915 --- /dev/null +++ b/arch/loongarch/pci/pci.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/vgaarb.h> +#include <asm/cacheflush.h> +#include <asm/loongson.h> + +#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00 +#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06 +#define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36 + +int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->read(bus_tmp, devfn, reg, len, val); + return -EINVAL; +} + +int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->write(bus_tmp, devfn, reg, len, val); + return -EINVAL; +} + +phys_addr_t mcfg_addr_init(int node) +{ + return (((u64)node << 44) | MCFG_EXT_PCICFG_BASE); +} + +static int __init pcibios_init(void) +{ + unsigned int lsize; + + /* + * Set PCI cacheline size to that of the last level in the + * cache hierarchy. + */ + lsize = cpu_last_level_cache_line_size(); + + BUG_ON(!lsize); + + pci_dfl_cache_line_size = lsize >> 2; + + pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize); + + return 0; +} + +subsys_initcall(pcibios_init); + +int pcibios_device_add(struct pci_dev *dev) +{ + int id; + struct irq_domain *dom; + + id = pci_domain_nr(dev->bus); + dom = irq_find_matching_fwnode(get_pch_msi_handle(id), DOMAIN_BUS_PCI_MSI); + dev_set_msi_domain(&dev->dev, dom); + + return 0; +} + +int pcibios_alloc_irq(struct pci_dev *dev) +{ + if (acpi_disabled) + return 0; + if (pci_dev_msi_enabled(dev)) + return 0; + return acpi_pci_irq_enable(dev); +} + +static void pci_fixup_vgadev(struct pci_dev *pdev) +{ + struct pci_dev *devp = NULL; + + while ((devp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, devp))) { + if (devp->vendor != PCI_VENDOR_ID_LOONGSON) { + vga_set_default_device(devp); + dev_info(&pdev->dev, + "Overriding boot device as %X:%X\n", + devp->vendor, devp->device); + } + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev); diff --git a/arch/loongarch/vdso/.gitignore b/arch/loongarch/vdso/.gitignore new file mode 100644 index 000000000..652e31d82 --- /dev/null +++ b/arch/loongarch/vdso/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +vdso.lds diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile new file mode 100644 index 000000000..d89e2ac75 --- /dev/null +++ b/arch/loongarch/vdso/Makefile @@ -0,0 +1,97 @@ +# SPDX-License-Identifier: GPL-2.0 +# Objects to go into the VDSO. + +# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before +# the inclusion of generic Makefile. +ARCH_REL_TYPE_ABS := R_LARCH_32|R_LARCH_64|R_LARCH_MARK_LA|R_LARCH_JUMP_SLOT +include $(srctree)/lib/vdso/Makefile + +obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o sigreturn.o + +# Common compiler flags between ABIs. +ccflags-vdso := \ + $(filter -I%,$(KBUILD_CFLAGS)) \ + $(filter -E%,$(KBUILD_CFLAGS)) \ + $(filter -march=%,$(KBUILD_CFLAGS)) \ + $(filter -m%-float,$(KBUILD_CFLAGS)) \ + -D__VDSO__ + +ifeq ($(cc-name),clang) +ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS)) +endif + +cflags-vdso := $(ccflags-vdso) \ + -isystem $(shell $(CC) -print-file-name=include) \ + $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ + -O2 -g -fno-strict-aliasing -fno-common -fno-builtin -G0 \ + -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ + $(call cc-option, -fno-asynchronous-unwind-tables) \ + $(call cc-option, -fno-stack-protector) +aflags-vdso := $(ccflags-vdso) \ + -D__ASSEMBLY__ -Wa,-gdwarf-2 + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) +endif + +# VDSO linker flags. +ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ + $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ + --hash-style=sysv --build-id -T + +GCOV_PROFILE := n + +# +# Shared build commands. +# + +quiet_cmd_vdsold_and_vdso_check = LD $@ + cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check) + +quiet_cmd_vdsoas_o_S = AS $@ + cmd_vdsoas_o_S = $(CC) $(a_flags) -c -o $@ $< + +# Generate VDSO offsets using helper script +gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + +include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE + $(call if_changed,vdsosym) + +# +# Build native VDSO. +# + +native-abi := $(filter -mabi=%,$(KBUILD_CFLAGS)) + +targets += $(obj-vdso-y) +targets += vdso.lds vdso.so.dbg vdso.so + +obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o) + +$(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi) +$(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi) + +$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi) + +$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold_and_vdso_check) + +$(obj)/vdso.so: OBJCOPYFLAGS := -S +$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE + $(call if_changed,objcopy) + +obj-y += vdso.o + +$(obj)/vdso.o : $(obj)/vdso.so + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso.so diff --git a/arch/loongarch/vdso/elf.S b/arch/loongarch/vdso/elf.S new file mode 100644 index 000000000..9bb21b9f9 --- /dev/null +++ b/arch/loongarch/vdso/elf.S @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <asm/vdso/vdso.h> + +#include <linux/elfnote.h> +#include <linux/version.h> + +ELFNOTE_START(Linux, 0, "a") + .long LINUX_VERSION_CODE +ELFNOTE_END diff --git a/arch/loongarch/vdso/gen_vdso_offsets.sh b/arch/loongarch/vdso/gen_vdso_offsets.sh new file mode 100755 index 000000000..1bb4e1264 --- /dev/null +++ b/arch/loongarch/vdso/gen_vdso_offsets.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# +# Derived from RISC-V and ARM64: +# Author: Will Deacon <will.deacon@arm.com> +# +# Match symbols in the DSO that look like VDSO_*; produce a header file +# of constant offsets into the shared object. +# + +LC_ALL=C sed -n -e 's/^00*/0/' -e \ +'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p' diff --git a/arch/loongarch/vdso/sigreturn.S b/arch/loongarch/vdso/sigreturn.S new file mode 100644 index 000000000..9cb3c58fa --- /dev/null +++ b/arch/loongarch/vdso/sigreturn.S @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <asm/vdso/vdso.h> + +#include <linux/linkage.h> +#include <uapi/asm/unistd.h> + +#include <asm/regdef.h> +#include <asm/asm.h> + + .section .text + .cfi_sections .debug_frame + +SYM_FUNC_START(__vdso_rt_sigreturn) + + li.w a7, __NR_rt_sigreturn + syscall 0 + +SYM_FUNC_END(__vdso_rt_sigreturn) diff --git a/arch/loongarch/vdso/vdso.S b/arch/loongarch/vdso/vdso.S new file mode 100644 index 000000000..46789bade --- /dev/null +++ b/arch/loongarch/vdso/vdso.S @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from RISC-V: + * Copyright (C) 2014 Regents of the University of California + */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <asm/page.h> + + __PAGE_ALIGNED_DATA + + .globl vdso_start, vdso_end + .balign PAGE_SIZE +vdso_start: + .incbin "arch/loongarch/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S new file mode 100644 index 000000000..56ad85589 --- /dev/null +++ b/arch/loongarch/vdso/vdso.lds.S @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch") + +OUTPUT_ARCH(loongarch) + +SECTIONS +{ + PROVIDE(_start = .); + . = SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + .text : { *(.text*) } :text + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { *(.rodata*) } :text + + _end = .; + PROVIDE(end = .); + + /DISCARD/ : { + *(.gnu.attributes) + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +VERSION +{ + LINUX_5.10 { + global: + __vdso_getcpu; + __vdso_clock_getres; + __vdso_clock_gettime; + __vdso_gettimeofday; + __vdso_rt_sigreturn; + local: *; + }; +} + +/* + * Make the sigreturn code visible to the kernel. + */ +VDSO_sigreturn = __vdso_rt_sigreturn; diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c new file mode 100644 index 000000000..e02e775f5 --- /dev/null +++ b/arch/loongarch/vdso/vgetcpu.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Fast user context implementation of getcpu() + */ + +#include <asm/vdso.h> +#include <linux/getcpu.h> + +static __always_inline int read_cpu_id(void) +{ + int cpu_id; + + __asm__ __volatile__( + " rdtime.d $zero, %0\n" + : "=r" (cpu_id) + : + : "memory"); + + return cpu_id; +} + +static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void) +{ + return (struct vdso_pcpu_data *)(get_vdso_base() - VDSO_DATA_SIZE); +} + +extern +int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused); +int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused) +{ + int cpu_id; + const struct vdso_pcpu_data *data; + + cpu_id = read_cpu_id(); + + if (cpu) + *cpu = cpu_id; + + if (node) { + data = get_pcpu_data(); + *node = data[cpu_id].node; + } + + return 0; +} diff --git a/arch/loongarch/vdso/vgettimeofday.c b/arch/loongarch/vdso/vgettimeofday.c new file mode 100644 index 000000000..8f22863bd --- /dev/null +++ b/arch/loongarch/vdso/vgettimeofday.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * LoongArch userspace implementations of gettimeofday() and similar. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/types.h> + +extern +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +extern +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +extern +int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); +int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) +{ + return __cvdso_clock_getres(clock_id, res); +} |