diff options
Diffstat (limited to 'arch/loongarch/include')
134 files changed, 12151 insertions, 0 deletions
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild new file mode 100644 index 0000000000..93783fa24f --- /dev/null +++ b/arch/loongarch/include/asm/Kbuild @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += dma-contiguous.h +generic-y += mcs_spinlock.h +generic-y += parport.h +generic-y += early_ioremap.h +generic-y += qrwlock.h +generic-y += rwsem.h +generic-y += segment.h +generic-y += user.h +generic-y += stat.h +generic-y += fcntl.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += mman.h +generic-y += msgbuf.h +generic-y += sembuf.h +generic-y += shmbuf.h +generic-y += statfs.h +generic-y += socket.h +generic-y += sockios.h +generic-y += termbits.h +generic-y += poll.h +generic-y += param.h +generic-y += posix_types.h +generic-y += resource.h +generic-y += kvm_para.h diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h new file mode 100644 index 0000000000..52f298f729 --- /dev/null +++ b/arch/loongarch/include/asm/acenv.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * LoongArch specific ACPICA environments and implementation + * + * Author: Jianmin Lv <lvjianmin@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_ACENV_H +#define _ASM_LOONGARCH_ACENV_H + +/* + * This header is required by ACPI core, but we have nothing to fill in + * right now. Will be updated later when needed. + */ + +#endif /* _ASM_LOONGARCH_ACENV_H */ diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h new file mode 100644 index 0000000000..8de6c4b83a --- /dev/null +++ b/arch/loongarch/include/asm/acpi.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Jianmin Lv <lvjianmin@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_ACPI_H +#define _ASM_LOONGARCH_ACPI_H + +#include <asm/suspend.h> + +#ifdef CONFIG_ACPI +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; +extern int acpi_noirq; +extern int pptt_enabled; + +#define acpi_os_ioremap acpi_os_ioremap +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +static inline bool acpi_has_cpu_in_madt(void) +{ + return true; +} + +extern struct list_head acpi_wakeup_device_list; +extern struct acpi_madt_core_pic acpi_core_pic[NR_CPUS]; + +extern int __init parse_acpi_topology(void); + +static inline u32 get_acpi_id_for_cpu(unsigned int cpu) +{ + return acpi_core_pic[cpu_logical_map(cpu)].processor_id; +} + +#endif /* !CONFIG_ACPI */ + +#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT + +extern int loongarch_acpi_suspend(void); +extern int (*acpi_suspend_lowlevel)(void); + +static inline unsigned long acpi_get_wakeup_address(void) +{ +#ifdef CONFIG_SUSPEND + return (unsigned long)loongarch_wakeup_start; +#endif + return 0UL; +} + +#endif /* _ASM_LOONGARCH_ACPI_H */ diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h new file mode 100644 index 0000000000..b24437e28c --- /dev/null +++ b/arch/loongarch/include/asm/addrspace.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1996, 99 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999 by Silicon Graphics, Inc. + */ +#ifndef _ASM_ADDRSPACE_H +#define _ASM_ADDRSPACE_H + +#include <linux/const.h> + +#include <asm/loongarch.h> + +/* + * This gives the physical RAM offset. + */ +#ifndef __ASSEMBLY__ +#ifndef PHYS_OFFSET +#define PHYS_OFFSET _UL(0) +#endif +extern unsigned long vm_map_base; +#endif /* __ASSEMBLY__ */ + +#ifndef IO_BASE +#define IO_BASE CSR_DMW0_BASE +#endif + +#ifndef CACHE_BASE +#define CACHE_BASE CSR_DMW1_BASE +#endif + +#ifndef UNCACHE_BASE +#define UNCACHE_BASE CSR_DMW0_BASE +#endif + +#define DMW_PABITS 48 +#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1) + +/* + * Memory above this physical address will be considered highmem. + */ +#ifndef HIGHMEM_START +#define HIGHMEM_START (_UL(1) << _UL(DMW_PABITS)) +#endif + +#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK)) +#define TO_CACHE(x) (CACHE_BASE | ((x) & TO_PHYS_MASK)) +#define TO_UNCACHE(x) (UNCACHE_BASE | ((x) & TO_PHYS_MASK)) + +/* + * This handles the memory map. + */ +#ifndef PAGE_OFFSET +#define PAGE_OFFSET (CACHE_BASE + PHYS_OFFSET) +#endif + +#ifndef FIXADDR_TOP +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) +#endif + +#ifdef __ASSEMBLY__ +#define _ATYPE_ +#define _ATYPE32_ +#define _ATYPE64_ +#else +#define _ATYPE_ __PTRDIFF_TYPE__ +#define _ATYPE32_ int +#define _ATYPE64_ __s64 +#endif + +#ifdef CONFIG_64BIT +#define _CONST64_(x) _UL(x) +#else +#define _CONST64_(x) _ULL(x) +#endif + +/* + * 32/64-bit LoongArch address spaces + */ +#ifdef __ASSEMBLY__ +#define _ACAST32_ +#define _ACAST64_ +#else +#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */ +#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */ +#endif + +#ifdef CONFIG_32BIT + +#define UVRANGE 0x00000000 +#define KPRANGE0 0x80000000 +#define KPRANGE1 0xa0000000 +#define KVRANGE 0xc0000000 + +#else + +#define XUVRANGE _CONST64_(0x0000000000000000) +#define XSPRANGE _CONST64_(0x4000000000000000) +#define XKPRANGE _CONST64_(0x8000000000000000) +#define XKVRANGE _CONST64_(0xc000000000000000) + +#endif + +/* + * Returns the physical address of a KPRANGEx / XKPRANGE address + */ +#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK) + +/* + * On LoongArch, I/O ports mappring is following: + * + * | .... | + * |-----------------------| + * | pci io ports(16K~32M) | + * |-----------------------| + * | isa io ports(0 ~16K) | + * PCI_IOBASE ->|-----------------------| + * | .... | + */ +#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE))) +#define PCI_IOSIZE SZ_32M +#define ISA_IOSIZE SZ_16K +#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) + +#define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS) + +#endif /* _ASM_ADDRSPACE_H */ diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h new file mode 100644 index 0000000000..ff3d10ac39 --- /dev/null +++ b/arch/loongarch/include/asm/alternative-asm.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ALTERNATIVE_ASM_H +#define _ASM_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +#include <asm/asm.h> + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . + .short \feature + .byte \orig_len + .byte \alt_len +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".fill" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature +140 : + \oldinstr +141 : + .fill - (((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)) / 4, 4, 0x03400000 +142 : + + .pushsection .altinstructions, "a" + altinstruction_entry 140b, 143f, \feature, 142b-140b, 144f-143f + .popsection + + .subsection 1 +143 : + \newinstr +144 : + .previous +.endm + +#define old_len (141b-140b) +#define new_len1 (144f-143f) +#define new_len2 (145f-144f) + +#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) + +/* + * Same as ALTERNATIVE macro above but for two alternatives. If CPU + * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has + * @feature2, it replaces @oldinstr with @feature2. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 +140 : + \oldinstr +141 : + .fill - ((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \ + (alt_max_short(new_len1, new_len2) - (old_len)) / 4, 4, 0x03400000 +142 : + + .pushsection .altinstructions, "a" + altinstruction_entry 140b, 143f, \feature1, 142b-140b, 144f-143f, 142b-141b + altinstruction_entry 140b, 144f, \feature2, 142b-140b, 145f-144f, 142b-141b + .popsection + + .subsection 1 +143 : + \newinstr1 +144 : + \newinstr2 +145 : + .previous +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ALTERNATIVE_ASM_H */ diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h new file mode 100644 index 0000000000..cee7b29785 --- /dev/null +++ b/arch/loongarch/include/asm/alternative.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ALTERNATIVE_H +#define _ASM_ALTERNATIVE_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/stringify.h> +#include <asm/asm.h> + +struct alt_instr { + s32 instr_offset; /* offset to original instruction */ + s32 replace_offset; /* offset to replacement instruction */ + u16 feature; /* feature bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction */ +} __packed; + +/* + * Debug flag that can be tested to see whether alternative + * instructions were patched in already: + */ +extern int alternatives_patched; +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; + +extern void alternative_instructions(void); +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +#define b_replacement(num) "664"#num +#define e_replacement(num) "665"#num + +#define alt_end_marker "663" +#define alt_slen "662b-661b" +#define alt_total_slen alt_end_marker"b-661b" +#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" + +#define __OLDINSTR(oldinstr, num) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".fill -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \ + "((" alt_rlen(num) ")-(" alt_slen ")) / 4, 4, 0x03400000\n" + +#define OLDINSTR(oldinstr, num) \ + __OLDINSTR(oldinstr, num) \ + alt_end_marker ":\n" + +#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))" + +/* + * Pad the second replacement alternative with additional NOPs if it is + * additionally longer than the first replacement alternative. + */ +#define OLDINSTR_2(oldinstr, num1, num2) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".fill -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \ + "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) / 4, " \ + "4, 0x03400000\n" \ + alt_end_marker ":\n" + +#define ALTINSTR_ENTRY(feature, num) \ + " .long 661b - .\n" /* label */ \ + " .long " b_replacement(num)"f - .\n" /* new instruction */ \ + " .short " __stringify(feature) "\n" /* feature bit */ \ + " .byte " alt_total_slen "\n" /* source len */ \ + " .byte " alt_rlen(num) "\n" /* replacement len */ + +#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ + b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t" + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, newinstr, feature) \ + OLDINSTR(oldinstr, 1) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature, 1) \ + ".popsection\n" \ + ".subsection 1\n" \ + ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ + ".previous\n" + +#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ + OLDINSTR_2(oldinstr, 1, 2) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature1, 1) \ + ALTINSTR_ENTRY(feature2, 2) \ + ".popsection\n" \ + ".subsection 1\n" \ + ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ + ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ + ".previous\n" + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + (asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")) + +#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ + (asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ALTERNATIVE_H */ diff --git a/arch/loongarch/include/asm/asm-extable.h b/arch/loongarch/include/asm/asm-extable.h new file mode 100644 index 0000000000..df05005f2b --- /dev/null +++ b/arch/loongarch/include/asm/asm-extable.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_ASM_EXTABLE_H +#define __ASM_ASM_EXTABLE_H + +#define EX_TYPE_NONE 0 +#define EX_TYPE_FIXUP 1 +#define EX_TYPE_UACCESS_ERR_ZERO 2 +#define EX_TYPE_BPF 3 + +#ifdef __ASSEMBLY__ + +#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ + .pushsection __ex_table, "a"; \ + .balign 4; \ + .long ((insn) - .); \ + .long ((fixup) - .); \ + .short (type); \ + .short (data); \ + .popsection; + + .macro _asm_extable, insn, fixup + __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) + .endm + +#else /* __ASSEMBLY__ */ + +#include <linux/bits.h> +#include <linux/stringify.h> +#include <asm/gpr-num.h> + +#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ + ".pushsection __ex_table, \"a\"\n" \ + ".balign 4\n" \ + ".long ((" insn ") - .)\n" \ + ".long ((" fixup ") - .)\n" \ + ".short (" type ")\n" \ + ".short (" data ")\n" \ + ".popsection\n" + +#define _ASM_EXTABLE(insn, fixup) \ + __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0") + +#define EX_DATA_REG_ERR_SHIFT 0 +#define EX_DATA_REG_ERR GENMASK(4, 0) +#define EX_DATA_REG_ZERO_SHIFT 5 +#define EX_DATA_REG_ZERO GENMASK(9, 5) + +#define EX_DATA_REG(reg, gpr) \ + "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" + +#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \ + __DEFINE_ASM_GPR_NUMS \ + __ASM_EXTABLE_RAW(#insn, #fixup, \ + __stringify(EX_TYPE_UACCESS_ERR_ZERO), \ + "(" \ + EX_DATA_REG(ERR, err) " | " \ + EX_DATA_REG(ZERO, zero) \ + ")") + +#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ASM_EXTABLE_H */ diff --git a/arch/loongarch/include/asm/asm-offsets.h b/arch/loongarch/include/asm/asm-offsets.h new file mode 100644 index 0000000000..d9ad88d293 --- /dev/null +++ b/arch/loongarch/include/asm/asm-offsets.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <generated/asm-offsets.h> diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h new file mode 100644 index 0000000000..cf8e1a4e7c --- /dev/null +++ b/arch/loongarch/include/asm/asm-prototypes.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/uaccess.h> +#include <asm/fpu.h> +#include <asm/lbt.h> +#include <asm/mmu_context.h> +#include <asm/page.h> +#include <asm/ftrace.h> +#include <asm-generic/asm-prototypes.h> diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h new file mode 100644 index 0000000000..f591b3245d --- /dev/null +++ b/arch/loongarch/include/asm/asm.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Some useful macros for LoongArch assembler code + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle + * Copyright (C) 1999 by Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2002 Maciej W. Rozycki + */ +#ifndef __ASM_ASM_H +#define __ASM_ASM_H + +/* LoongArch pref instruction. */ +#ifdef CONFIG_CPU_HAS_PREFETCH + +#define PREF(hint, addr, offs) \ + preld hint, addr, offs; \ + +#define PREFX(hint, addr, index) \ + preldx hint, addr, index; \ + +#else /* !CONFIG_CPU_HAS_PREFETCH */ + +#define PREF(hint, addr, offs) +#define PREFX(hint, addr, index) + +#endif /* !CONFIG_CPU_HAS_PREFETCH */ + +/* + * Stack alignment + */ +#define STACK_ALIGN ~(0xf) + +/* + * Macros to handle different pointer/register sizes for 32/64-bit code + */ + +/* + * Size of a register + */ +#ifndef __loongarch64 +#define SZREG 4 +#else +#define SZREG 8 +#endif + +/* + * Use the following macros in assemblercode to load/store registers, + * pointers etc. + */ +#if (SZREG == 4) +#define REG_L ld.w +#define REG_S st.w +#define REG_ADD add.w +#define REG_SUB sub.w +#else /* SZREG == 8 */ +#define REG_L ld.d +#define REG_S st.d +#define REG_ADD add.d +#define REG_SUB sub.d +#endif + +/* + * How to add/sub/load/store/shift C int variables. + */ +#if (__SIZEOF_INT__ == 4) +#define INT_ADD add.w +#define INT_ADDI addi.w +#define INT_SUB sub.w +#define INT_L ld.w +#define INT_S st.w +#define INT_SLL slli.w +#define INT_SLLV sll.w +#define INT_SRL srli.w +#define INT_SRLV srl.w +#define INT_SRA srai.w +#define INT_SRAV sra.w +#endif + +#if (__SIZEOF_INT__ == 8) +#define INT_ADD add.d +#define INT_ADDI addi.d +#define INT_SUB sub.d +#define INT_L ld.d +#define INT_S st.d +#define INT_SLL slli.d +#define INT_SLLV sll.d +#define INT_SRL srli.d +#define INT_SRLV srl.d +#define INT_SRA srai.d +#define INT_SRAV sra.d +#endif + +/* + * How to add/sub/load/store/shift C long variables. + */ +#if (__SIZEOF_LONG__ == 4) +#define LONG_ADD add.w +#define LONG_ADDI addi.w +#define LONG_SUB sub.w +#define LONG_L ld.w +#define LONG_S st.w +#define LONG_SLL slli.w +#define LONG_SLLV sll.w +#define LONG_SRL srli.w +#define LONG_SRLV srl.w +#define LONG_SRA srai.w +#define LONG_SRAV sra.w + +#ifdef __ASSEMBLY__ +#define LONG .word +#endif +#define LONGSIZE 4 +#define LONGMASK 3 +#define LONGLOG 2 +#endif + +#if (__SIZEOF_LONG__ == 8) +#define LONG_ADD add.d +#define LONG_ADDI addi.d +#define LONG_SUB sub.d +#define LONG_L ld.d +#define LONG_S st.d +#define LONG_SLL slli.d +#define LONG_SLLV sll.d +#define LONG_SRL srli.d +#define LONG_SRLV srl.d +#define LONG_SRA srai.d +#define LONG_SRAV sra.d + +#ifdef __ASSEMBLY__ +#define LONG .dword +#endif +#define LONGSIZE 8 +#define LONGMASK 7 +#define LONGLOG 3 +#endif + +/* + * How to add/sub/load/store/shift pointers. + */ +#if (__SIZEOF_POINTER__ == 4) +#define PTR_ADD add.w +#define PTR_ADDI addi.w +#define PTR_SUB sub.w +#define PTR_L ld.w +#define PTR_S st.w +#define PTR_LI li.w +#define PTR_SLL slli.w +#define PTR_SLLV sll.w +#define PTR_SRL srli.w +#define PTR_SRLV srl.w +#define PTR_SRA srai.w +#define PTR_SRAV sra.w + +#define PTR_SCALESHIFT 2 + +#ifdef __ASSEMBLY__ +#define PTR .word +#endif +#define PTRSIZE 4 +#define PTRLOG 2 +#endif + +#if (__SIZEOF_POINTER__ == 8) +#define PTR_ADD add.d +#define PTR_ADDI addi.d +#define PTR_SUB sub.d +#define PTR_L ld.d +#define PTR_S st.d +#define PTR_LI li.d +#define PTR_SLL slli.d +#define PTR_SLLV sll.d +#define PTR_SRL srli.d +#define PTR_SRLV srl.d +#define PTR_SRA srai.d +#define PTR_SRAV sra.d + +#define PTR_SCALESHIFT 3 + +#ifdef __ASSEMBLY__ +#define PTR .dword +#endif +#define PTRSIZE 8 +#define PTRLOG 3 +#endif + +/* Annotate a function as being unsuitable for kprobes. */ +#ifdef CONFIG_KPROBES +#define _ASM_NOKPROBE(name) \ + .pushsection "_kprobe_blacklist", "aw"; \ + .quad name; \ + .popsection +#else +#define _ASM_NOKPROBE(name) +#endif + +#endif /* __ASM_ASM_H */ diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h new file mode 100644 index 0000000000..655db7d7a4 --- /dev/null +++ b/arch/loongarch/include/asm/asmmacro.h @@ -0,0 +1,618 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_ASMMACRO_H +#define _ASM_ASMMACRO_H + +#include <asm/asm-offsets.h> +#include <asm/regdef.h> +#include <asm/fpregdef.h> +#include <asm/loongarch.h> + + .macro cpu_save_nonscratch thread + stptr.d s0, \thread, THREAD_REG23 + stptr.d s1, \thread, THREAD_REG24 + stptr.d s2, \thread, THREAD_REG25 + stptr.d s3, \thread, THREAD_REG26 + stptr.d s4, \thread, THREAD_REG27 + stptr.d s5, \thread, THREAD_REG28 + stptr.d s6, \thread, THREAD_REG29 + stptr.d s7, \thread, THREAD_REG30 + stptr.d s8, \thread, THREAD_REG31 + stptr.d sp, \thread, THREAD_REG03 + stptr.d fp, \thread, THREAD_REG22 + .endm + + .macro cpu_restore_nonscratch thread + ldptr.d s0, \thread, THREAD_REG23 + ldptr.d s1, \thread, THREAD_REG24 + ldptr.d s2, \thread, THREAD_REG25 + ldptr.d s3, \thread, THREAD_REG26 + ldptr.d s4, \thread, THREAD_REG27 + ldptr.d s5, \thread, THREAD_REG28 + ldptr.d s6, \thread, THREAD_REG29 + ldptr.d s7, \thread, THREAD_REG30 + ldptr.d s8, \thread, THREAD_REG31 + ldptr.d ra, \thread, THREAD_REG01 + ldptr.d sp, \thread, THREAD_REG03 + ldptr.d fp, \thread, THREAD_REG22 + .endm + + .macro fpu_save_csr thread tmp + movfcsr2gr \tmp, fcsr0 + stptr.w \tmp, \thread, THREAD_FCSR +#ifdef CONFIG_CPU_HAS_LBT + /* TM bit is always 0 if LBT not supported */ + andi \tmp, \tmp, FPU_CSR_TM + beqz \tmp, 1f + /* Save FTOP */ + x86mftop \tmp + stptr.w \tmp, \thread, THREAD_FTOP + /* Turn off TM to ensure the order of FPR in memory independent of TM */ + x86clrtm +1: +#endif + .endm + + .macro fpu_restore_csr thread tmp0 tmp1 + ldptr.w \tmp0, \thread, THREAD_FCSR + movgr2fcsr fcsr0, \tmp0 +#ifdef CONFIG_CPU_HAS_LBT + /* TM bit is always 0 if LBT not supported */ + andi \tmp0, \tmp0, FPU_CSR_TM + beqz \tmp0, 2f + /* Restore FTOP */ + ldptr.w \tmp0, \thread, THREAD_FTOP + andi \tmp0, \tmp0, 0x7 + la.pcrel \tmp1, 1f + alsl.d \tmp1, \tmp0, \tmp1, 3 + jr \tmp1 +1: + x86mttop 0 + b 2f + x86mttop 1 + b 2f + x86mttop 2 + b 2f + x86mttop 3 + b 2f + x86mttop 4 + b 2f + x86mttop 5 + b 2f + x86mttop 6 + b 2f + x86mttop 7 +2: +#endif + .endm + + .macro fpu_save_cc thread tmp0 tmp1 + movcf2gr \tmp0, $fcc0 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc1 + bstrins.d \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc2 + bstrins.d \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc3 + bstrins.d \tmp1, \tmp0, 31, 24 + movcf2gr \tmp0, $fcc4 + bstrins.d \tmp1, \tmp0, 39, 32 + movcf2gr \tmp0, $fcc5 + bstrins.d \tmp1, \tmp0, 47, 40 + movcf2gr \tmp0, $fcc6 + bstrins.d \tmp1, \tmp0, 55, 48 + movcf2gr \tmp0, $fcc7 + bstrins.d \tmp1, \tmp0, 63, 56 + stptr.d \tmp1, \thread, THREAD_FCC + .endm + + .macro fpu_restore_cc thread tmp0 tmp1 + ldptr.d \tmp0, \thread, THREAD_FCC + bstrpick.d \tmp1, \tmp0, 7, 0 + movgr2cf $fcc0, \tmp1 + bstrpick.d \tmp1, \tmp0, 15, 8 + movgr2cf $fcc1, \tmp1 + bstrpick.d \tmp1, \tmp0, 23, 16 + movgr2cf $fcc2, \tmp1 + bstrpick.d \tmp1, \tmp0, 31, 24 + movgr2cf $fcc3, \tmp1 + bstrpick.d \tmp1, \tmp0, 39, 32 + movgr2cf $fcc4, \tmp1 + bstrpick.d \tmp1, \tmp0, 47, 40 + movgr2cf $fcc5, \tmp1 + bstrpick.d \tmp1, \tmp0, 55, 48 + movgr2cf $fcc6, \tmp1 + bstrpick.d \tmp1, \tmp0, 63, 56 + movgr2cf $fcc7, \tmp1 + .endm + + .macro fpu_save_double thread tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \tmp, \tmp, \thread + fst.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0 + fst.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0 + fst.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0 + fst.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0 + fst.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0 + fst.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0 + fst.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0 + fst.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0 + fst.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0 + fst.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0 + fst.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0 + fst.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0 + fst.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0 + fst.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0 + fst.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0 + fst.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0 + fst.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0 + fst.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0 + fst.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0 + fst.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0 + fst.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0 + fst.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0 + fst.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0 + fst.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0 + fst.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0 + fst.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0 + fst.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0 + fst.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0 + fst.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0 + fst.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0 + fst.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0 + fst.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0 + .endm + + .macro fpu_restore_double thread tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \tmp, \tmp, \thread + fld.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0 + fld.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0 + fld.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0 + fld.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0 + fld.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0 + fld.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0 + fld.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0 + fld.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0 + fld.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0 + fld.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0 + fld.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0 + fld.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0 + fld.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0 + fld.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0 + fld.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0 + fld.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0 + fld.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0 + fld.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0 + fld.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0 + fld.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0 + fld.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0 + fld.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0 + fld.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0 + fld.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0 + fld.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0 + fld.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0 + fld.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0 + fld.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0 + fld.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0 + fld.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0 + fld.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0 + fld.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0 + .endm + + .macro lsx_save_data thread tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \tmp, \thread, \tmp + vst $vr0, \tmp, THREAD_FPR0 - THREAD_FPR0 + vst $vr1, \tmp, THREAD_FPR1 - THREAD_FPR0 + vst $vr2, \tmp, THREAD_FPR2 - THREAD_FPR0 + vst $vr3, \tmp, THREAD_FPR3 - THREAD_FPR0 + vst $vr4, \tmp, THREAD_FPR4 - THREAD_FPR0 + vst $vr5, \tmp, THREAD_FPR5 - THREAD_FPR0 + vst $vr6, \tmp, THREAD_FPR6 - THREAD_FPR0 + vst $vr7, \tmp, THREAD_FPR7 - THREAD_FPR0 + vst $vr8, \tmp, THREAD_FPR8 - THREAD_FPR0 + vst $vr9, \tmp, THREAD_FPR9 - THREAD_FPR0 + vst $vr10, \tmp, THREAD_FPR10 - THREAD_FPR0 + vst $vr11, \tmp, THREAD_FPR11 - THREAD_FPR0 + vst $vr12, \tmp, THREAD_FPR12 - THREAD_FPR0 + vst $vr13, \tmp, THREAD_FPR13 - THREAD_FPR0 + vst $vr14, \tmp, THREAD_FPR14 - THREAD_FPR0 + vst $vr15, \tmp, THREAD_FPR15 - THREAD_FPR0 + vst $vr16, \tmp, THREAD_FPR16 - THREAD_FPR0 + vst $vr17, \tmp, THREAD_FPR17 - THREAD_FPR0 + vst $vr18, \tmp, THREAD_FPR18 - THREAD_FPR0 + vst $vr19, \tmp, THREAD_FPR19 - THREAD_FPR0 + vst $vr20, \tmp, THREAD_FPR20 - THREAD_FPR0 + vst $vr21, \tmp, THREAD_FPR21 - THREAD_FPR0 + vst $vr22, \tmp, THREAD_FPR22 - THREAD_FPR0 + vst $vr23, \tmp, THREAD_FPR23 - THREAD_FPR0 + vst $vr24, \tmp, THREAD_FPR24 - THREAD_FPR0 + vst $vr25, \tmp, THREAD_FPR25 - THREAD_FPR0 + vst $vr26, \tmp, THREAD_FPR26 - THREAD_FPR0 + vst $vr27, \tmp, THREAD_FPR27 - THREAD_FPR0 + vst $vr28, \tmp, THREAD_FPR28 - THREAD_FPR0 + vst $vr29, \tmp, THREAD_FPR29 - THREAD_FPR0 + vst $vr30, \tmp, THREAD_FPR30 - THREAD_FPR0 + vst $vr31, \tmp, THREAD_FPR31 - THREAD_FPR0 + .endm + + .macro lsx_restore_data thread tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \tmp, \thread, \tmp + vld $vr0, \tmp, THREAD_FPR0 - THREAD_FPR0 + vld $vr1, \tmp, THREAD_FPR1 - THREAD_FPR0 + vld $vr2, \tmp, THREAD_FPR2 - THREAD_FPR0 + vld $vr3, \tmp, THREAD_FPR3 - THREAD_FPR0 + vld $vr4, \tmp, THREAD_FPR4 - THREAD_FPR0 + vld $vr5, \tmp, THREAD_FPR5 - THREAD_FPR0 + vld $vr6, \tmp, THREAD_FPR6 - THREAD_FPR0 + vld $vr7, \tmp, THREAD_FPR7 - THREAD_FPR0 + vld $vr8, \tmp, THREAD_FPR8 - THREAD_FPR0 + vld $vr9, \tmp, THREAD_FPR9 - THREAD_FPR0 + vld $vr10, \tmp, THREAD_FPR10 - THREAD_FPR0 + vld $vr11, \tmp, THREAD_FPR11 - THREAD_FPR0 + vld $vr12, \tmp, THREAD_FPR12 - THREAD_FPR0 + vld $vr13, \tmp, THREAD_FPR13 - THREAD_FPR0 + vld $vr14, \tmp, THREAD_FPR14 - THREAD_FPR0 + vld $vr15, \tmp, THREAD_FPR15 - THREAD_FPR0 + vld $vr16, \tmp, THREAD_FPR16 - THREAD_FPR0 + vld $vr17, \tmp, THREAD_FPR17 - THREAD_FPR0 + vld $vr18, \tmp, THREAD_FPR18 - THREAD_FPR0 + vld $vr19, \tmp, THREAD_FPR19 - THREAD_FPR0 + vld $vr20, \tmp, THREAD_FPR20 - THREAD_FPR0 + vld $vr21, \tmp, THREAD_FPR21 - THREAD_FPR0 + vld $vr22, \tmp, THREAD_FPR22 - THREAD_FPR0 + vld $vr23, \tmp, THREAD_FPR23 - THREAD_FPR0 + vld $vr24, \tmp, THREAD_FPR24 - THREAD_FPR0 + vld $vr25, \tmp, THREAD_FPR25 - THREAD_FPR0 + vld $vr26, \tmp, THREAD_FPR26 - THREAD_FPR0 + vld $vr27, \tmp, THREAD_FPR27 - THREAD_FPR0 + vld $vr28, \tmp, THREAD_FPR28 - THREAD_FPR0 + vld $vr29, \tmp, THREAD_FPR29 - THREAD_FPR0 + vld $vr30, \tmp, THREAD_FPR30 - THREAD_FPR0 + vld $vr31, \tmp, THREAD_FPR31 - THREAD_FPR0 + .endm + + .macro lsx_save_all thread tmp0 tmp1 + fpu_save_cc \thread, \tmp0, \tmp1 + fpu_save_csr \thread, \tmp0 + lsx_save_data \thread, \tmp0 + .endm + + .macro lsx_restore_all thread tmp0 tmp1 + lsx_restore_data \thread, \tmp0 + fpu_restore_cc \thread, \tmp0, \tmp1 + fpu_restore_csr \thread, \tmp0, \tmp1 + .endm + + .macro lsx_save_upper vd base tmp off + vpickve2gr.d \tmp, \vd, 1 + st.d \tmp, \base, (\off+8) + .endm + + .macro lsx_save_all_upper thread base tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \base, \thread, \tmp + lsx_save_upper $vr0, \base, \tmp, (THREAD_FPR0-THREAD_FPR0) + lsx_save_upper $vr1, \base, \tmp, (THREAD_FPR1-THREAD_FPR0) + lsx_save_upper $vr2, \base, \tmp, (THREAD_FPR2-THREAD_FPR0) + lsx_save_upper $vr3, \base, \tmp, (THREAD_FPR3-THREAD_FPR0) + lsx_save_upper $vr4, \base, \tmp, (THREAD_FPR4-THREAD_FPR0) + lsx_save_upper $vr5, \base, \tmp, (THREAD_FPR5-THREAD_FPR0) + lsx_save_upper $vr6, \base, \tmp, (THREAD_FPR6-THREAD_FPR0) + lsx_save_upper $vr7, \base, \tmp, (THREAD_FPR7-THREAD_FPR0) + lsx_save_upper $vr8, \base, \tmp, (THREAD_FPR8-THREAD_FPR0) + lsx_save_upper $vr9, \base, \tmp, (THREAD_FPR9-THREAD_FPR0) + lsx_save_upper $vr10, \base, \tmp, (THREAD_FPR10-THREAD_FPR0) + lsx_save_upper $vr11, \base, \tmp, (THREAD_FPR11-THREAD_FPR0) + lsx_save_upper $vr12, \base, \tmp, (THREAD_FPR12-THREAD_FPR0) + lsx_save_upper $vr13, \base, \tmp, (THREAD_FPR13-THREAD_FPR0) + lsx_save_upper $vr14, \base, \tmp, (THREAD_FPR14-THREAD_FPR0) + lsx_save_upper $vr15, \base, \tmp, (THREAD_FPR15-THREAD_FPR0) + lsx_save_upper $vr16, \base, \tmp, (THREAD_FPR16-THREAD_FPR0) + lsx_save_upper $vr17, \base, \tmp, (THREAD_FPR17-THREAD_FPR0) + lsx_save_upper $vr18, \base, \tmp, (THREAD_FPR18-THREAD_FPR0) + lsx_save_upper $vr19, \base, \tmp, (THREAD_FPR19-THREAD_FPR0) + lsx_save_upper $vr20, \base, \tmp, (THREAD_FPR20-THREAD_FPR0) + lsx_save_upper $vr21, \base, \tmp, (THREAD_FPR21-THREAD_FPR0) + lsx_save_upper $vr22, \base, \tmp, (THREAD_FPR22-THREAD_FPR0) + lsx_save_upper $vr23, \base, \tmp, (THREAD_FPR23-THREAD_FPR0) + lsx_save_upper $vr24, \base, \tmp, (THREAD_FPR24-THREAD_FPR0) + lsx_save_upper $vr25, \base, \tmp, (THREAD_FPR25-THREAD_FPR0) + lsx_save_upper $vr26, \base, \tmp, (THREAD_FPR26-THREAD_FPR0) + lsx_save_upper $vr27, \base, \tmp, (THREAD_FPR27-THREAD_FPR0) + lsx_save_upper $vr28, \base, \tmp, (THREAD_FPR28-THREAD_FPR0) + lsx_save_upper $vr29, \base, \tmp, (THREAD_FPR29-THREAD_FPR0) + lsx_save_upper $vr30, \base, \tmp, (THREAD_FPR30-THREAD_FPR0) + lsx_save_upper $vr31, \base, \tmp, (THREAD_FPR31-THREAD_FPR0) + .endm + + .macro lsx_restore_upper vd base tmp off + ld.d \tmp, \base, (\off+8) + vinsgr2vr.d \vd, \tmp, 1 + .endm + + .macro lsx_restore_all_upper thread base tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \base, \thread, \tmp + lsx_restore_upper $vr0, \base, \tmp, (THREAD_FPR0-THREAD_FPR0) + lsx_restore_upper $vr1, \base, \tmp, (THREAD_FPR1-THREAD_FPR0) + lsx_restore_upper $vr2, \base, \tmp, (THREAD_FPR2-THREAD_FPR0) + lsx_restore_upper $vr3, \base, \tmp, (THREAD_FPR3-THREAD_FPR0) + lsx_restore_upper $vr4, \base, \tmp, (THREAD_FPR4-THREAD_FPR0) + lsx_restore_upper $vr5, \base, \tmp, (THREAD_FPR5-THREAD_FPR0) + lsx_restore_upper $vr6, \base, \tmp, (THREAD_FPR6-THREAD_FPR0) + lsx_restore_upper $vr7, \base, \tmp, (THREAD_FPR7-THREAD_FPR0) + lsx_restore_upper $vr8, \base, \tmp, (THREAD_FPR8-THREAD_FPR0) + lsx_restore_upper $vr9, \base, \tmp, (THREAD_FPR9-THREAD_FPR0) + lsx_restore_upper $vr10, \base, \tmp, (THREAD_FPR10-THREAD_FPR0) + lsx_restore_upper $vr11, \base, \tmp, (THREAD_FPR11-THREAD_FPR0) + lsx_restore_upper $vr12, \base, \tmp, (THREAD_FPR12-THREAD_FPR0) + lsx_restore_upper $vr13, \base, \tmp, (THREAD_FPR13-THREAD_FPR0) + lsx_restore_upper $vr14, \base, \tmp, (THREAD_FPR14-THREAD_FPR0) + lsx_restore_upper $vr15, \base, \tmp, (THREAD_FPR15-THREAD_FPR0) + lsx_restore_upper $vr16, \base, \tmp, (THREAD_FPR16-THREAD_FPR0) + lsx_restore_upper $vr17, \base, \tmp, (THREAD_FPR17-THREAD_FPR0) + lsx_restore_upper $vr18, \base, \tmp, (THREAD_FPR18-THREAD_FPR0) + lsx_restore_upper $vr19, \base, \tmp, (THREAD_FPR19-THREAD_FPR0) + lsx_restore_upper $vr20, \base, \tmp, (THREAD_FPR20-THREAD_FPR0) + lsx_restore_upper $vr21, \base, \tmp, (THREAD_FPR21-THREAD_FPR0) + lsx_restore_upper $vr22, \base, \tmp, (THREAD_FPR22-THREAD_FPR0) + lsx_restore_upper $vr23, \base, \tmp, (THREAD_FPR23-THREAD_FPR0) + lsx_restore_upper $vr24, \base, \tmp, (THREAD_FPR24-THREAD_FPR0) + lsx_restore_upper $vr25, \base, \tmp, (THREAD_FPR25-THREAD_FPR0) + lsx_restore_upper $vr26, \base, \tmp, (THREAD_FPR26-THREAD_FPR0) + lsx_restore_upper $vr27, \base, \tmp, (THREAD_FPR27-THREAD_FPR0) + lsx_restore_upper $vr28, \base, \tmp, (THREAD_FPR28-THREAD_FPR0) + lsx_restore_upper $vr29, \base, \tmp, (THREAD_FPR29-THREAD_FPR0) + lsx_restore_upper $vr30, \base, \tmp, (THREAD_FPR30-THREAD_FPR0) + lsx_restore_upper $vr31, \base, \tmp, (THREAD_FPR31-THREAD_FPR0) + .endm + + .macro lsx_init_upper vd tmp + vinsgr2vr.d \vd, \tmp, 1 + .endm + + .macro lsx_init_all_upper tmp + not \tmp, zero + lsx_init_upper $vr0 \tmp + lsx_init_upper $vr1 \tmp + lsx_init_upper $vr2 \tmp + lsx_init_upper $vr3 \tmp + lsx_init_upper $vr4 \tmp + lsx_init_upper $vr5 \tmp + lsx_init_upper $vr6 \tmp + lsx_init_upper $vr7 \tmp + lsx_init_upper $vr8 \tmp + lsx_init_upper $vr9 \tmp + lsx_init_upper $vr10 \tmp + lsx_init_upper $vr11 \tmp + lsx_init_upper $vr12 \tmp + lsx_init_upper $vr13 \tmp + lsx_init_upper $vr14 \tmp + lsx_init_upper $vr15 \tmp + lsx_init_upper $vr16 \tmp + lsx_init_upper $vr17 \tmp + lsx_init_upper $vr18 \tmp + lsx_init_upper $vr19 \tmp + lsx_init_upper $vr20 \tmp + lsx_init_upper $vr21 \tmp + lsx_init_upper $vr22 \tmp + lsx_init_upper $vr23 \tmp + lsx_init_upper $vr24 \tmp + lsx_init_upper $vr25 \tmp + lsx_init_upper $vr26 \tmp + lsx_init_upper $vr27 \tmp + lsx_init_upper $vr28 \tmp + lsx_init_upper $vr29 \tmp + lsx_init_upper $vr30 \tmp + lsx_init_upper $vr31 \tmp + .endm + + .macro lasx_save_data thread tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \tmp, \thread, \tmp + xvst $xr0, \tmp, THREAD_FPR0 - THREAD_FPR0 + xvst $xr1, \tmp, THREAD_FPR1 - THREAD_FPR0 + xvst $xr2, \tmp, THREAD_FPR2 - THREAD_FPR0 + xvst $xr3, \tmp, THREAD_FPR3 - THREAD_FPR0 + xvst $xr4, \tmp, THREAD_FPR4 - THREAD_FPR0 + xvst $xr5, \tmp, THREAD_FPR5 - THREAD_FPR0 + xvst $xr6, \tmp, THREAD_FPR6 - THREAD_FPR0 + xvst $xr7, \tmp, THREAD_FPR7 - THREAD_FPR0 + xvst $xr8, \tmp, THREAD_FPR8 - THREAD_FPR0 + xvst $xr9, \tmp, THREAD_FPR9 - THREAD_FPR0 + xvst $xr10, \tmp, THREAD_FPR10 - THREAD_FPR0 + xvst $xr11, \tmp, THREAD_FPR11 - THREAD_FPR0 + xvst $xr12, \tmp, THREAD_FPR12 - THREAD_FPR0 + xvst $xr13, \tmp, THREAD_FPR13 - THREAD_FPR0 + xvst $xr14, \tmp, THREAD_FPR14 - THREAD_FPR0 + xvst $xr15, \tmp, THREAD_FPR15 - THREAD_FPR0 + xvst $xr16, \tmp, THREAD_FPR16 - THREAD_FPR0 + xvst $xr17, \tmp, THREAD_FPR17 - THREAD_FPR0 + xvst $xr18, \tmp, THREAD_FPR18 - THREAD_FPR0 + xvst $xr19, \tmp, THREAD_FPR19 - THREAD_FPR0 + xvst $xr20, \tmp, THREAD_FPR20 - THREAD_FPR0 + xvst $xr21, \tmp, THREAD_FPR21 - THREAD_FPR0 + xvst $xr22, \tmp, THREAD_FPR22 - THREAD_FPR0 + xvst $xr23, \tmp, THREAD_FPR23 - THREAD_FPR0 + xvst $xr24, \tmp, THREAD_FPR24 - THREAD_FPR0 + xvst $xr25, \tmp, THREAD_FPR25 - THREAD_FPR0 + xvst $xr26, \tmp, THREAD_FPR26 - THREAD_FPR0 + xvst $xr27, \tmp, THREAD_FPR27 - THREAD_FPR0 + xvst $xr28, \tmp, THREAD_FPR28 - THREAD_FPR0 + xvst $xr29, \tmp, THREAD_FPR29 - THREAD_FPR0 + xvst $xr30, \tmp, THREAD_FPR30 - THREAD_FPR0 + xvst $xr31, \tmp, THREAD_FPR31 - THREAD_FPR0 + .endm + + .macro lasx_restore_data thread tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \tmp, \thread, \tmp + xvld $xr0, \tmp, THREAD_FPR0 - THREAD_FPR0 + xvld $xr1, \tmp, THREAD_FPR1 - THREAD_FPR0 + xvld $xr2, \tmp, THREAD_FPR2 - THREAD_FPR0 + xvld $xr3, \tmp, THREAD_FPR3 - THREAD_FPR0 + xvld $xr4, \tmp, THREAD_FPR4 - THREAD_FPR0 + xvld $xr5, \tmp, THREAD_FPR5 - THREAD_FPR0 + xvld $xr6, \tmp, THREAD_FPR6 - THREAD_FPR0 + xvld $xr7, \tmp, THREAD_FPR7 - THREAD_FPR0 + xvld $xr8, \tmp, THREAD_FPR8 - THREAD_FPR0 + xvld $xr9, \tmp, THREAD_FPR9 - THREAD_FPR0 + xvld $xr10, \tmp, THREAD_FPR10 - THREAD_FPR0 + xvld $xr11, \tmp, THREAD_FPR11 - THREAD_FPR0 + xvld $xr12, \tmp, THREAD_FPR12 - THREAD_FPR0 + xvld $xr13, \tmp, THREAD_FPR13 - THREAD_FPR0 + xvld $xr14, \tmp, THREAD_FPR14 - THREAD_FPR0 + xvld $xr15, \tmp, THREAD_FPR15 - THREAD_FPR0 + xvld $xr16, \tmp, THREAD_FPR16 - THREAD_FPR0 + xvld $xr17, \tmp, THREAD_FPR17 - THREAD_FPR0 + xvld $xr18, \tmp, THREAD_FPR18 - THREAD_FPR0 + xvld $xr19, \tmp, THREAD_FPR19 - THREAD_FPR0 + xvld $xr20, \tmp, THREAD_FPR20 - THREAD_FPR0 + xvld $xr21, \tmp, THREAD_FPR21 - THREAD_FPR0 + xvld $xr22, \tmp, THREAD_FPR22 - THREAD_FPR0 + xvld $xr23, \tmp, THREAD_FPR23 - THREAD_FPR0 + xvld $xr24, \tmp, THREAD_FPR24 - THREAD_FPR0 + xvld $xr25, \tmp, THREAD_FPR25 - THREAD_FPR0 + xvld $xr26, \tmp, THREAD_FPR26 - THREAD_FPR0 + xvld $xr27, \tmp, THREAD_FPR27 - THREAD_FPR0 + xvld $xr28, \tmp, THREAD_FPR28 - THREAD_FPR0 + xvld $xr29, \tmp, THREAD_FPR29 - THREAD_FPR0 + xvld $xr30, \tmp, THREAD_FPR30 - THREAD_FPR0 + xvld $xr31, \tmp, THREAD_FPR31 - THREAD_FPR0 + .endm + + .macro lasx_save_all thread tmp0 tmp1 + fpu_save_cc \thread, \tmp0, \tmp1 + fpu_save_csr \thread, \tmp0 + lasx_save_data \thread, \tmp0 + .endm + + .macro lasx_restore_all thread tmp0 tmp1 + lasx_restore_data \thread, \tmp0 + fpu_restore_cc \thread, \tmp0, \tmp1 + fpu_restore_csr \thread, \tmp0, \tmp1 + .endm + + .macro lasx_save_upper xd base tmp off + /* Nothing */ + .endm + + .macro lasx_save_all_upper thread base tmp + /* Nothing */ + .endm + + .macro lasx_restore_upper xd base tmp0 tmp1 off + vld \tmp0, \base, (\off+16) + xvpermi.q \xd, \tmp1, 0x2 + .endm + + .macro lasx_restore_all_upper thread base tmp + li.w \tmp, THREAD_FPR0 + PTR_ADD \base, \thread, \tmp + /* Save $vr31 ($xr31 lower bits) with xvpickve2gr */ + xvpickve2gr.d $r17, $xr31, 0 + xvpickve2gr.d $r18, $xr31, 1 + lasx_restore_upper $xr0, \base, $vr31, $xr31, (THREAD_FPR0-THREAD_FPR0) + lasx_restore_upper $xr1, \base, $vr31, $xr31, (THREAD_FPR1-THREAD_FPR0) + lasx_restore_upper $xr2, \base, $vr31, $xr31, (THREAD_FPR2-THREAD_FPR0) + lasx_restore_upper $xr3, \base, $vr31, $xr31, (THREAD_FPR3-THREAD_FPR0) + lasx_restore_upper $xr4, \base, $vr31, $xr31, (THREAD_FPR4-THREAD_FPR0) + lasx_restore_upper $xr5, \base, $vr31, $xr31, (THREAD_FPR5-THREAD_FPR0) + lasx_restore_upper $xr6, \base, $vr31, $xr31, (THREAD_FPR6-THREAD_FPR0) + lasx_restore_upper $xr7, \base, $vr31, $xr31, (THREAD_FPR7-THREAD_FPR0) + lasx_restore_upper $xr8, \base, $vr31, $xr31, (THREAD_FPR8-THREAD_FPR0) + lasx_restore_upper $xr9, \base, $vr31, $xr31, (THREAD_FPR9-THREAD_FPR0) + lasx_restore_upper $xr10, \base, $vr31, $xr31, (THREAD_FPR10-THREAD_FPR0) + lasx_restore_upper $xr11, \base, $vr31, $xr31, (THREAD_FPR11-THREAD_FPR0) + lasx_restore_upper $xr12, \base, $vr31, $xr31, (THREAD_FPR12-THREAD_FPR0) + lasx_restore_upper $xr13, \base, $vr31, $xr31, (THREAD_FPR13-THREAD_FPR0) + lasx_restore_upper $xr14, \base, $vr31, $xr31, (THREAD_FPR14-THREAD_FPR0) + lasx_restore_upper $xr15, \base, $vr31, $xr31, (THREAD_FPR15-THREAD_FPR0) + lasx_restore_upper $xr16, \base, $vr31, $xr31, (THREAD_FPR16-THREAD_FPR0) + lasx_restore_upper $xr17, \base, $vr31, $xr31, (THREAD_FPR17-THREAD_FPR0) + lasx_restore_upper $xr18, \base, $vr31, $xr31, (THREAD_FPR18-THREAD_FPR0) + lasx_restore_upper $xr19, \base, $vr31, $xr31, (THREAD_FPR19-THREAD_FPR0) + lasx_restore_upper $xr20, \base, $vr31, $xr31, (THREAD_FPR20-THREAD_FPR0) + lasx_restore_upper $xr21, \base, $vr31, $xr31, (THREAD_FPR21-THREAD_FPR0) + lasx_restore_upper $xr22, \base, $vr31, $xr31, (THREAD_FPR22-THREAD_FPR0) + lasx_restore_upper $xr23, \base, $vr31, $xr31, (THREAD_FPR23-THREAD_FPR0) + lasx_restore_upper $xr24, \base, $vr31, $xr31, (THREAD_FPR24-THREAD_FPR0) + lasx_restore_upper $xr25, \base, $vr31, $xr31, (THREAD_FPR25-THREAD_FPR0) + lasx_restore_upper $xr26, \base, $vr31, $xr31, (THREAD_FPR26-THREAD_FPR0) + lasx_restore_upper $xr27, \base, $vr31, $xr31, (THREAD_FPR27-THREAD_FPR0) + lasx_restore_upper $xr28, \base, $vr31, $xr31, (THREAD_FPR28-THREAD_FPR0) + lasx_restore_upper $xr29, \base, $vr31, $xr31, (THREAD_FPR29-THREAD_FPR0) + lasx_restore_upper $xr30, \base, $vr31, $xr31, (THREAD_FPR30-THREAD_FPR0) + lasx_restore_upper $xr31, \base, $vr31, $xr31, (THREAD_FPR31-THREAD_FPR0) + /* Restore $vr31 ($xr31 lower bits) with xvinsgr2vr */ + xvinsgr2vr.d $xr31, $r17, 0 + xvinsgr2vr.d $xr31, $r18, 1 + .endm + + .macro lasx_init_upper xd tmp + xvinsgr2vr.d \xd, \tmp, 2 + xvinsgr2vr.d \xd, \tmp, 3 + .endm + + .macro lasx_init_all_upper tmp + not \tmp, zero + lasx_init_upper $xr0 \tmp + lasx_init_upper $xr1 \tmp + lasx_init_upper $xr2 \tmp + lasx_init_upper $xr3 \tmp + lasx_init_upper $xr4 \tmp + lasx_init_upper $xr5 \tmp + lasx_init_upper $xr6 \tmp + lasx_init_upper $xr7 \tmp + lasx_init_upper $xr8 \tmp + lasx_init_upper $xr9 \tmp + lasx_init_upper $xr10 \tmp + lasx_init_upper $xr11 \tmp + lasx_init_upper $xr12 \tmp + lasx_init_upper $xr13 \tmp + lasx_init_upper $xr14 \tmp + lasx_init_upper $xr15 \tmp + lasx_init_upper $xr16 \tmp + lasx_init_upper $xr17 \tmp + lasx_init_upper $xr18 \tmp + lasx_init_upper $xr19 \tmp + lasx_init_upper $xr20 \tmp + lasx_init_upper $xr21 \tmp + lasx_init_upper $xr22 \tmp + lasx_init_upper $xr23 \tmp + lasx_init_upper $xr24 \tmp + lasx_init_upper $xr25 \tmp + lasx_init_upper $xr26 \tmp + lasx_init_upper $xr27 \tmp + lasx_init_upper $xr28 \tmp + lasx_init_upper $xr29 \tmp + lasx_init_upper $xr30 \tmp + lasx_init_upper $xr31 \tmp + .endm + +.macro not dst src + nor \dst, \src, zero +.endm + +.macro la_abs reg, sym +#ifndef CONFIG_RELOCATABLE + la.abs \reg, \sym +#else + 766: + lu12i.w \reg, 0 + ori \reg, \reg, 0 + lu32i.d \reg, 0 + lu52i.d \reg, \reg, 0 + .pushsection ".la_abs", "aw", %progbits + .dword 766b + .dword \sym + .popsection +#endif +.endm + +#endif /* _ASM_ASMMACRO_H */ diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h new file mode 100644 index 0000000000..e27f0c72d3 --- /dev/null +++ b/arch/loongarch/include/asm/atomic.h @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Atomic operations. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_ATOMIC_H +#define _ASM_ATOMIC_H + +#include <linux/types.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h> + +#if __SIZEOF_LONG__ == 4 +#define __LL "ll.w " +#define __SC "sc.w " +#define __AMADD "amadd.w " +#define __AMAND_DB "amand_db.w " +#define __AMOR_DB "amor_db.w " +#define __AMXOR_DB "amxor_db.w " +#elif __SIZEOF_LONG__ == 8 +#define __LL "ll.d " +#define __SC "sc.d " +#define __AMADD "amadd.d " +#define __AMAND_DB "amand_db.d " +#define __AMOR_DB "amor_db.d " +#define __AMXOR_DB "amxor_db.d " +#endif + +#define ATOMIC_INIT(i) { (i) } + +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) + +#define ATOMIC_OP(op, I, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + __asm__ __volatile__( \ + "am"#asm_op"_db.w" " $zero, %1, %0 \n" \ + : "+ZB" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} + +#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op"_db.w" " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result c_op I; \ +} + +#define ATOMIC_FETCH_OP(op, I, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op"_db.w" " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result; \ +} + +#define ATOMIC_OPS(op, I, asm_op, c_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ + ATOMIC_FETCH_OP(op, I, asm_op) + +ATOMIC_OPS(add, i, add, +) +ATOMIC_OPS(sub, -i, add, +) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, I, asm_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_FETCH_OP(op, I, asm_op) + +ATOMIC_OPS(and, i, and) +ATOMIC_OPS(or, i, or) +ATOMIC_OPS(xor, i, xor) + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int prev, rc; + + __asm__ __volatile__ ( + "0: ll.w %[p], %[c]\n" + " beq %[p], %[u], 1f\n" + " add.w %[rc], %[p], %[a]\n" + " sc.w %[rc], %[c]\n" + " beqz %[rc], 0b\n" + " b 2f\n" + "1:\n" + __WEAK_LLSC_MB + "2:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), + [c]"=ZB" (v->counter) + : [a]"r" (a), [u]"r" (u) + : "memory"); + + return prev; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless + +static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) +{ + int result; + int temp; + + if (__builtin_constant_p(i)) { + __asm__ __volatile__( + "1: ll.w %1, %2 # atomic_sub_if_positive\n" + " addi.w %0, %1, %3 \n" + " move %1, %0 \n" + " bltz %0, 2f \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + "2: \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) + : "I" (-i)); + } else { + __asm__ __volatile__( + "1: ll.w %1, %2 # atomic_sub_if_positive\n" + " sub.w %0, %1, %3 \n" + " move %1, %0 \n" + " bltz %0, 2f \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + "2: \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) + : "r" (i)); + } + + return result; +} + +#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v) + +#ifdef CONFIG_64BIT + +#define ATOMIC64_INIT(i) { (i) } + +#define arch_atomic64_read(v) READ_ONCE((v)->counter) +#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) + +#define ATOMIC64_OP(op, I, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + __asm__ __volatile__( \ + "am"#asm_op"_db.d " " $zero, %1, %0 \n" \ + : "+ZB" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} + +#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \ +{ \ + long result; \ + __asm__ __volatile__( \ + "am"#asm_op"_db.d " " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result c_op I; \ +} + +#define ATOMIC64_FETCH_OP(op, I, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op"_db.d " " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result; \ +} + +#define ATOMIC64_OPS(op, I, asm_op, c_op) \ + ATOMIC64_OP(op, I, asm_op) \ + ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ + ATOMIC64_FETCH_OP(op, I, asm_op) + +ATOMIC64_OPS(add, i, add, +) +ATOMIC64_OPS(sub, -i, add, +) + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed + +#undef ATOMIC64_OPS + +#define ATOMIC64_OPS(op, I, asm_op) \ + ATOMIC64_OP(op, I, asm_op) \ + ATOMIC64_FETCH_OP(op, I, asm_op) + +ATOMIC64_OPS(and, i, and) +ATOMIC64_OPS(or, i, or) +ATOMIC64_OPS(xor, i, xor) + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed + +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long prev, rc; + + __asm__ __volatile__ ( + "0: ll.d %[p], %[c]\n" + " beq %[p], %[u], 1f\n" + " add.d %[rc], %[p], %[a]\n" + " sc.d %[rc], %[c]\n" + " beqz %[rc], 0b\n" + " b 2f\n" + "1:\n" + __WEAK_LLSC_MB + "2:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), + [c] "=ZB" (v->counter) + : [a]"r" (a), [u]"r" (u) + : "memory"); + + return prev; +} +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless + +static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) +{ + long result; + long temp; + + if (__builtin_constant_p(i)) { + __asm__ __volatile__( + "1: ll.d %1, %2 # atomic64_sub_if_positive \n" + " addi.d %0, %1, %3 \n" + " move %1, %0 \n" + " bltz %0, 2f \n" + " sc.d %1, %2 \n" + " beqz %1, 1b \n" + "2: \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) + : "I" (-i)); + } else { + __asm__ __volatile__( + "1: ll.d %1, %2 # atomic64_sub_if_positive \n" + " sub.d %0, %1, %3 \n" + " move %1, %0 \n" + " bltz %0, 2f \n" + " sc.d %1, %2 \n" + " beqz %1, 1b \n" + "2: \n" + __WEAK_LLSC_MB + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) + : "r" (i)); + } + + return result; +} + +#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v) + +#endif /* CONFIG_64BIT */ + +#endif /* _ASM_ATOMIC_H */ diff --git a/arch/loongarch/include/asm/barrier.h b/arch/loongarch/include/asm/barrier.h new file mode 100644 index 0000000000..4b663f1977 --- /dev/null +++ b/arch/loongarch/include/asm/barrier.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +/* + * Hint encoding: + * + * Bit4: ordering or completion (0: completion, 1: ordering) + * Bit3: barrier for previous read (0: true, 1: false) + * Bit2: barrier for previous write (0: true, 1: false) + * Bit1: barrier for succeeding read (0: true, 1: false) + * Bit0: barrier for succeeding write (0: true, 1: false) + * + * Hint 0x700: barrier for "read after read" from the same address + */ + +#define DBAR(hint) __asm__ __volatile__("dbar %0 " : : "I"(hint) : "memory") + +#define crwrw 0b00000 +#define cr_r_ 0b00101 +#define c_w_w 0b01010 + +#define orwrw 0b10000 +#define or_r_ 0b10101 +#define o_w_w 0b11010 + +#define orw_w 0b10010 +#define or_rw 0b10100 + +#define c_sync() DBAR(crwrw) +#define c_rsync() DBAR(cr_r_) +#define c_wsync() DBAR(c_w_w) + +#define o_sync() DBAR(orwrw) +#define o_rsync() DBAR(or_r_) +#define o_wsync() DBAR(o_w_w) + +#define ldacq_mb() DBAR(or_rw) +#define strel_mb() DBAR(orw_w) + +#define mb() c_sync() +#define rmb() c_rsync() +#define wmb() c_wsync() +#define iob() c_sync() +#define wbflush() c_sync() + +#define __smp_mb() o_sync() +#define __smp_rmb() o_rsync() +#define __smp_wmb() o_wsync() + +#ifdef CONFIG_SMP +#define __WEAK_LLSC_MB " dbar 0x700 \n" +#else +#define __WEAK_LLSC_MB " \n" +#endif + +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + +/** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * Returns: + * 0 - (@index < @size) + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + __asm__ __volatile__( + "sltu %0, %1, %2\n\t" +#if (__SIZEOF_LONG__ == 4) + "sub.w %0, $zero, %0\n\t" +#elif (__SIZEOF_LONG__ == 8) + "sub.d %0, $zero, %0\n\t" +#endif + : "=r" (mask) + : "r" (index), "r" (size) + :); + + return mask; +} + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + ldacq_mb(); \ + ___p1; \ +}) + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + strel_mb(); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_store_mb(p, v) \ +do { \ + union { typeof(p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(p)) (v) }; \ + unsigned long __tmp; \ + switch (sizeof(p)) { \ + case 1: \ + *(volatile __u8 *)&p = *(__u8 *)__u.__c; \ + __smp_mb(); \ + break; \ + case 2: \ + *(volatile __u16 *)&p = *(__u16 *)__u.__c; \ + __smp_mb(); \ + break; \ + case 4: \ + __asm__ __volatile__( \ + "amswap_db.w %[tmp], %[val], %[mem] \n" \ + : [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \ + : [val] "r" (*(__u32 *)__u.__c) \ + : ); \ + break; \ + case 8: \ + __asm__ __volatile__( \ + "amswap_db.d %[tmp], %[val], %[mem] \n" \ + : [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \ + : [val] "r" (*(__u64 *)__u.__c) \ + : ); \ + break; \ + } \ +} while (0) + +#include <asm-generic/barrier.h> + +#endif /* __ASM_BARRIER_H */ diff --git a/arch/loongarch/include/asm/bitops.h b/arch/loongarch/include/asm/bitops.h new file mode 100644 index 0000000000..69e00f8d80 --- /dev/null +++ b/arch/loongarch/include/asm/bitops.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BITOPS_H +#define _ASM_BITOPS_H + +#include <linux/compiler.h> + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <asm/barrier.h> + +#include <asm-generic/bitops/builtin-ffs.h> +#include <asm-generic/bitops/builtin-fls.h> +#include <asm-generic/bitops/builtin-__ffs.h> +#include <asm-generic/bitops/builtin-__fls.h> + +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls64.h> + +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> + +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> +#include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/ext2-atomic.h> + +#endif /* _ASM_BITOPS_H */ diff --git a/arch/loongarch/include/asm/bitrev.h b/arch/loongarch/include/asm/bitrev.h new file mode 100644 index 0000000000..46f275b9cd --- /dev/null +++ b/arch/loongarch/include/asm/bitrev.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __LOONGARCH_ASM_BITREV_H__ +#define __LOONGARCH_ASM_BITREV_H__ + +#include <linux/swab.h> + +static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x) +{ + u32 ret; + + asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab32(x))); + return ret; +} + +static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x) +{ + u16 ret; + + asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab16(x))); + return ret; +} + +static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x) +{ + u8 ret; + + asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(x)); + return ret; +} + +#endif /* __LOONGARCH_ASM_BITREV_H__ */ diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h new file mode 100644 index 0000000000..c60796869b --- /dev/null +++ b/arch/loongarch/include/asm/bootinfo.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BOOTINFO_H +#define _ASM_BOOTINFO_H + +#include <linux/types.h> +#include <asm/setup.h> + +const char *get_system_type(void); + +extern void init_environ(void); +extern void memblock_init(void); +extern void platform_init(void); +extern int __init init_numa_memory(void); + +struct loongson_board_info { + int bios_size; + const char *bios_vendor; + const char *bios_version; + const char *bios_release_date; + const char *board_name; + const char *board_vendor; +}; + +struct loongson_system_configuration { + int nr_cpus; + int nr_nodes; + int boot_cpu_id; + int cores_per_node; + int cores_per_package; + unsigned long cores_io_master; + unsigned long suspend_addr; + const char *cpuname; +}; + +extern u64 efi_system_table; +extern unsigned long fw_arg0, fw_arg1, fw_arg2; +extern struct loongson_board_info b_info; +extern struct loongson_system_configuration loongson_sysconf; + +static inline bool io_master(int cpu) +{ + return test_bit(cpu, &loongson_sysconf.cores_io_master); +} + +#endif /* _ASM_BOOTINFO_H */ diff --git a/arch/loongarch/include/asm/branch.h b/arch/loongarch/include/asm/branch.h new file mode 100644 index 0000000000..9a133e4c06 --- /dev/null +++ b/arch/loongarch/include/asm/branch.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BRANCH_H +#define _ASM_BRANCH_H + +#include <asm/ptrace.h> + +static inline unsigned long exception_era(struct pt_regs *regs) +{ + return regs->csr_era; +} + +static inline void compute_return_era(struct pt_regs *regs) +{ + regs->csr_era += 4; +} + +#endif /* _ASM_BRANCH_H */ diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h new file mode 100644 index 0000000000..d4ca3ba254 --- /dev/null +++ b/arch/loongarch/include/asm/bug.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BUG_H +#define __ASM_BUG_H + +#include <asm/break.h> +#include <linux/stringify.h> + +#ifndef CONFIG_DEBUG_BUGVERBOSE +#define _BUGVERBOSE_LOCATION(file, line) +#else +#define __BUGVERBOSE_LOCATION(file, line) \ + .pushsection .rodata.str, "aMS", @progbits, 1; \ + 10002: .string file; \ + .popsection; \ + \ + .long 10002b - .; \ + .short line; +#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) +#endif + +#ifndef CONFIG_GENERIC_BUG +#define __BUG_ENTRY(flags) +#else +#define __BUG_ENTRY(flags) \ + .pushsection __bug_table, "aw"; \ + .align 2; \ + 10000: .long 10001f - .; \ + _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ + .short flags; \ + .popsection; \ + 10001: +#endif + +#define ASM_BUG_FLAGS(flags) \ + __BUG_ENTRY(flags) \ + break BRK_BUG + +#define ASM_BUG() ASM_BUG_FLAGS(0) + +#define __BUG_FLAGS(flags) \ + asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags))); + +#define __WARN_FLAGS(flags) \ +do { \ + instrumentation_begin(); \ + __BUG_FLAGS(BUGFLAG_WARNING|(flags)); \ + instrumentation_end(); \ +} while (0) + +#define BUG() \ +do { \ + instrumentation_begin(); \ + __BUG_FLAGS(0); \ + unreachable(); \ +} while (0) + +#define HAVE_ARCH_BUG + +#include <asm-generic/bug.h> + +#endif /* __ASM_BUG_H */ diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h new file mode 100644 index 0000000000..1b6d096171 --- /dev/null +++ b/arch/loongarch/include/asm/cache.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_CACHE_H +#define _ASM_CACHE_H + +#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define __read_mostly __section(".data..read_mostly") + +#endif /* _ASM_CACHE_H */ diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h new file mode 100644 index 0000000000..80bd741069 --- /dev/null +++ b/arch/loongarch/include/asm/cacheflush.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_CACHEFLUSH_H +#define _ASM_CACHEFLUSH_H + +#include <linux/mm.h> +#include <asm/cpu-info.h> +#include <asm/cacheops.h> + +static inline bool cache_present(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_PRESENT; +} + +static inline bool cache_private(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_PRIVATE; +} + +static inline bool cache_inclusive(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_INCLUSIVE; +} + +static inline unsigned int cpu_last_level_cache_line_size(void) +{ + int cache_present = boot_cpu_data.cache_leaves_present; + + return boot_cpu_data.cache_leaves[cache_present - 1].linesz; +} + +asmlinkage void __flush_cache_all(void); +void local_flush_icache_range(unsigned long start, unsigned long end); + +#define flush_icache_range local_flush_icache_range +#define flush_icache_user_range local_flush_icache_range + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 + +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) +#define flush_icache_user_page(vma, page, addr, len) do { } while (0) +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#define cache_op(op, addr) \ + __asm__ __volatile__( \ + " cacop %0, %1 \n" \ + : \ + : "i" (op), "ZC" (*(unsigned char *)(addr))) + +static inline void flush_cache_line(int leaf, unsigned long addr) +{ + switch (leaf) { + case Cache_LEAF0: + cache_op(Index_Writeback_Inv_LEAF0, addr); + break; + case Cache_LEAF1: + cache_op(Index_Writeback_Inv_LEAF1, addr); + break; + case Cache_LEAF2: + cache_op(Index_Writeback_Inv_LEAF2, addr); + break; + case Cache_LEAF3: + cache_op(Index_Writeback_Inv_LEAF3, addr); + break; + case Cache_LEAF4: + cache_op(Index_Writeback_Inv_LEAF4, addr); + break; + case Cache_LEAF5: + cache_op(Index_Writeback_Inv_LEAF5, addr); + break; + default: + break; + } +} + +#include <asm-generic/cacheflush.h> + +#endif /* _ASM_CACHEFLUSH_H */ diff --git a/arch/loongarch/include/asm/cacheops.h b/arch/loongarch/include/asm/cacheops.h new file mode 100644 index 0000000000..0f4a86f8e2 --- /dev/null +++ b/arch/loongarch/include/asm/cacheops.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cache operations for the cache instruction. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_CACHEOPS_H +#define __ASM_CACHEOPS_H + +/* + * Most cache ops are split into a 3 bit field identifying the cache, and a 2 + * bit field identifying the cache operation. + */ +#define CacheOp_Cache 0x07 +#define CacheOp_Op 0x18 + +#define Cache_LEAF0 0x00 +#define Cache_LEAF1 0x01 +#define Cache_LEAF2 0x02 +#define Cache_LEAF3 0x03 +#define Cache_LEAF4 0x04 +#define Cache_LEAF5 0x05 + +#define Index_Invalidate 0x08 +#define Index_Writeback_Inv 0x08 +#define Hit_Invalidate 0x10 +#define Hit_Writeback_Inv 0x10 +#define CacheOp_User_Defined 0x18 + +#define Index_Writeback_Inv_LEAF0 (Cache_LEAF0 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF1 (Cache_LEAF1 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF2 (Cache_LEAF2 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF3 (Cache_LEAF3 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF4 (Cache_LEAF4 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF5 (Cache_LEAF5 | Index_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF0 (Cache_LEAF0 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF1 (Cache_LEAF1 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF2 (Cache_LEAF2 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF3 (Cache_LEAF3 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF4 (Cache_LEAF4 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF5 (Cache_LEAF5 | Hit_Writeback_Inv) + +#endif /* __ASM_CACHEOPS_H */ diff --git a/arch/loongarch/include/asm/checksum.h b/arch/loongarch/include/asm/checksum.h new file mode 100644 index 0000000000..cabbf6af44 --- /dev/null +++ b/arch/loongarch/include/asm/checksum.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 ARM Ltd. + * Copyright (C) 2023 Loongson Technology Corporation Limited + */ +#ifndef __ASM_CHECKSUM_H +#define __ASM_CHECKSUM_H + +#include <linux/bitops.h> +#include <linux/in6.h> + +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum); + +/* + * turns a 32-bit partial checksum (e.g. from csum_partial) into a + * 1's complement 16-bit checksum. + */ +static inline __sum16 csum_fold(__wsum sum) +{ + u32 tmp = (__force u32)sum; + + /* + * swap the two 16-bit halves of sum + * if there is a carry from adding the two 16-bit halves, + * it will carry from the lower half into the upper half, + * giving us the correct sum in the upper half. + */ + return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16); +} +#define csum_fold csum_fold + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. ihl is the number + * of 32-bit words and is always >= 5. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + u64 sum; + __uint128_t tmp; + int n = ihl; /* we want it signed */ + + tmp = *(const __uint128_t *)iph; + iph += 16; + n -= 4; + tmp += ((tmp >> 64) | (tmp << 64)); + sum = tmp >> 64; + do { + sum += *(const u32 *)iph; + iph += 4; + } while (--n > 0); + + sum += ror64(sum, 32); + return csum_fold((__force __wsum)(sum >> 32)); +} +#define ip_fast_csum ip_fast_csum + +extern unsigned int do_csum(const unsigned char *buff, int len); +#define do_csum do_csum + +#include <asm-generic/checksum.h> + +#endif /* __ASM_CHECKSUM_H */ diff --git a/arch/loongarch/include/asm/clocksource.h b/arch/loongarch/include/asm/clocksource.h new file mode 100644 index 0000000000..58e64aa05d --- /dev/null +++ b/arch/loongarch/include/asm/clocksource.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_CLOCKSOURCE_H +#define __ASM_CLOCKSOURCE_H + +#include <asm/vdso/clocksource.h> + +#endif /* __ASM_CLOCKSOURCE_H */ diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h new file mode 100644 index 0000000000..979fde61bb --- /dev/null +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_CMPXCHG_H +#define __ASM_CMPXCHG_H + +#include <linux/bits.h> +#include <linux/build_bug.h> +#include <asm/barrier.h> + +#define __xchg_asm(amswap_db, m, val) \ +({ \ + __typeof(val) __ret; \ + \ + __asm__ __volatile__ ( \ + " "amswap_db" %1, %z2, %0 \n" \ + : "+ZB" (*m), "=&r" (__ret) \ + : "Jr" (val) \ + : "memory"); \ + \ + __ret; \ +}) + +static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, + unsigned int size) +{ + unsigned int shift; + u32 old32, mask, temp; + volatile u32 *ptr32; + + /* Mask value to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + val &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * exchange within the naturally aligned 4 byte integerthat includes + * it. + */ + shift = (unsigned long)ptr & 0x3; + shift *= BITS_PER_BYTE; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + + asm volatile ( + "1: ll.w %0, %3 \n" + " andn %1, %0, %z4 \n" + " or %1, %1, %z5 \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32) + : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift) + : "memory"); + + return (old32 & mask) >> shift; +} + +static __always_inline unsigned long +__arch_xchg(volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + case 2: + return __xchg_small(ptr, x, size); + + case 4: + return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x); + + case 8: + return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x); + + default: + BUILD_BUG(); + } + + return 0; +} + +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __res; \ + \ + __res = (__typeof__(*(ptr))) \ + __arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ + \ + __res; \ +}) + +#define __cmpxchg_asm(ld, st, m, old, new) \ +({ \ + __typeof(old) __ret; \ + \ + __asm__ __volatile__( \ + "1: " ld " %0, %2 # __cmpxchg_asm \n" \ + " bne %0, %z3, 2f \n" \ + " move $t0, %z4 \n" \ + " " st " $t0, %1 \n" \ + " beqz $t0, 1b \n" \ + "2: \n" \ + __WEAK_LLSC_MB \ + : "=&r" (__ret), "=ZB"(*m) \ + : "ZB"(*m), "Jr" (old), "Jr" (new) \ + : "t0", "memory"); \ + \ + __ret; \ +}) + +static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old, + unsigned int new, unsigned int size) +{ + unsigned int shift; + u32 old32, mask, temp; + volatile u32 *ptr32; + + /* Mask inputs to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + old &= mask; + new &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * compare & exchange within the naturally aligned 4 byte integer + * that includes it. + */ + shift = (unsigned long)ptr & 0x3; + shift *= BITS_PER_BYTE; + old <<= shift; + new <<= shift; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + + asm volatile ( + "1: ll.w %0, %3 \n" + " and %1, %0, %z4 \n" + " bne %1, %z5, 2f \n" + " andn %1, %0, %z4 \n" + " or %1, %1, %z6 \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + " b 3f \n" + "2: \n" + __WEAK_LLSC_MB + "3: \n" + : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32) + : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new) + : "memory"); + + return (old32 & mask) >> shift; +} + +static __always_inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) +{ + switch (size) { + case 1: + case 2: + return __cmpxchg_small(ptr, old, new, size); + + case 4: + return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr, + (u32)old, new); + + case 8: + return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr, + (u64)old, new); + + default: + BUILD_BUG(); + } + + return 0; +} + +#define arch_cmpxchg_local(ptr, old, new) \ + ((__typeof__(*(ptr))) \ + __cmpxchg((ptr), \ + (unsigned long)(__typeof__(*(ptr)))(old), \ + (unsigned long)(__typeof__(*(ptr)))(new), \ + sizeof(*(ptr)))) + +#define arch_cmpxchg(ptr, old, new) \ +({ \ + __typeof__(*(ptr)) __res; \ + \ + __res = arch_cmpxchg_local((ptr), (old), (new)); \ + \ + __res; \ +}) + +#ifdef CONFIG_64BIT +#define arch_cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_local((ptr), (o), (n)); \ + }) + +#define arch_cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg((ptr), (o), (n)); \ + }) +#else +#include <asm-generic/cmpxchg-local.h> +#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) +#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n)) +#endif + +#endif /* __ASM_CMPXCHG_H */ diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h new file mode 100644 index 0000000000..2eafe6a6ac --- /dev/null +++ b/arch/loongarch/include/asm/cpu-features.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + */ +#ifndef __ASM_CPU_FEATURES_H +#define __ASM_CPU_FEATURES_H + +#include <asm/cpu.h> +#include <asm/cpu-info.h> + +#define cpu_opt(opt) (cpu_data[0].options & (opt)) +#define cpu_has(feat) (cpu_data[0].options & BIT_ULL(feat)) + +#define cpu_has_loongarch (cpu_has_loongarch32 | cpu_has_loongarch64) +#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT) +#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) + +#ifdef CONFIG_32BIT +# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) +# define cpu_vabits 31 +# define cpu_pabits 31 +#endif + +#ifdef CONFIG_64BIT +# define cpu_has_64bits 1 +# define cpu_vabits cpu_data[0].vabits +# define cpu_pabits cpu_data[0].pabits +# define __NEED_ADDRBITS_PROBE +#endif + +/* + * SMP assumption: Options of CPU 0 are a superset of all processors. + * This is true for all known LoongArch systems. + */ +#define cpu_has_cpucfg cpu_opt(LOONGARCH_CPU_CPUCFG) +#define cpu_has_lam cpu_opt(LOONGARCH_CPU_LAM) +#define cpu_has_ual cpu_opt(LOONGARCH_CPU_UAL) +#define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU) +#define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX) +#define cpu_has_lasx cpu_opt(LOONGARCH_CPU_LASX) +#define cpu_has_crc32 cpu_opt(LOONGARCH_CPU_CRC32) +#define cpu_has_complex cpu_opt(LOONGARCH_CPU_COMPLEX) +#define cpu_has_crypto cpu_opt(LOONGARCH_CPU_CRYPTO) +#define cpu_has_lvz cpu_opt(LOONGARCH_CPU_LVZ) +#define cpu_has_lbt_x86 cpu_opt(LOONGARCH_CPU_LBT_X86) +#define cpu_has_lbt_arm cpu_opt(LOONGARCH_CPU_LBT_ARM) +#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS) +#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips) +#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR) +#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB) +#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH) +#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT) +#define cpu_has_csripi cpu_opt(LOONGARCH_CPU_CSRIPI) +#define cpu_has_extioi cpu_opt(LOONGARCH_CPU_EXTIOI) +#define cpu_has_prefetch cpu_opt(LOONGARCH_CPU_PREFETCH) +#define cpu_has_pmp cpu_opt(LOONGARCH_CPU_PMP) +#define cpu_has_perf cpu_opt(LOONGARCH_CPU_PMP) +#define cpu_has_scalefreq cpu_opt(LOONGARCH_CPU_SCALEFREQ) +#define cpu_has_flatmode cpu_opt(LOONGARCH_CPU_FLATMODE) +#define cpu_has_eiodecode cpu_opt(LOONGARCH_CPU_EIODECODE) +#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID) +#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR) +#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW) + +#endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h new file mode 100644 index 0000000000..900589cb15 --- /dev/null +++ b/arch/loongarch/include/asm/cpu-info.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_CPU_INFO_H +#define __ASM_CPU_INFO_H + +#include <linux/cache.h> +#include <linux/types.h> + +#include <asm/loongarch.h> + +/* cache_desc->flags */ +enum { + CACHE_PRESENT = (1 << 0), + CACHE_PRIVATE = (1 << 1), /* core private cache */ + CACHE_INCLUSIVE = (1 << 2), /* include the inner level caches */ +}; + +/* + * Descriptor for a cache + */ +struct cache_desc { + unsigned char type; + unsigned char level; + unsigned short sets; /* Number of lines per set */ + unsigned char ways; /* Number of ways */ + unsigned char linesz; /* Size of line in bytes */ + unsigned char flags; /* Flags describing cache properties */ +}; + +#define CACHE_LEVEL_MAX 3 +#define CACHE_LEAVES_MAX 6 + +struct cpuinfo_loongarch { + u64 asid_cache; + unsigned long asid_mask; + + /* + * Capability and feature descriptor structure for LoongArch CPU + */ + unsigned long long options; + unsigned int processor_id; + unsigned int fpu_vers; + unsigned int fpu_csr0; + unsigned int fpu_mask; + unsigned int cputype; + int isa_level; + int tlbsize; + int tlbsizemtlb; + int tlbsizestlbsets; + int tlbsizestlbways; + int cache_leaves_present; /* number of cache_leaves[] elements */ + struct cache_desc cache_leaves[CACHE_LEAVES_MAX]; + int core; /* physical core number in package */ + int package;/* physical package number */ + int global_id; /* physical global thread number */ + int vabits; /* Virtual Address size in bits */ + int pabits; /* Physical Address size in bits */ + unsigned int ksave_mask; /* Usable KSave mask. */ + unsigned int watch_dreg_count; /* Number data breakpoints */ + unsigned int watch_ireg_count; /* Number instruction breakpoints */ + unsigned int watch_reg_use_cnt; /* min(NUM_WATCH_REGS, watch_dreg_count + watch_ireg_count), Usable by ptrace */ +} __aligned(SMP_CACHE_BYTES); + +extern struct cpuinfo_loongarch cpu_data[]; +#define boot_cpu_data cpu_data[0] +#define current_cpu_data cpu_data[smp_processor_id()] +#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] + +extern void cpu_probe(void); + +extern const char *__cpu_family[]; +extern const char *__cpu_full_name[]; +#define cpu_family_string() __cpu_family[raw_smp_processor_id()] +#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()] + +struct seq_file; +struct notifier_block; + +extern int register_proc_cpuinfo_notifier(struct notifier_block *nb); +extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v); + +#define proc_cpuinfo_notifier(fn, pri) \ +({ \ + static struct notifier_block fn##_nb = { \ + .notifier_call = fn, \ + .priority = pri \ + }; \ + \ + register_proc_cpuinfo_notifier(&fn##_nb); \ +}) + +struct proc_cpuinfo_notifier_args { + struct seq_file *m; + unsigned long n; +}; + +static inline bool cpus_are_siblings(int cpua, int cpub) +{ + struct cpuinfo_loongarch *infoa = &cpu_data[cpua]; + struct cpuinfo_loongarch *infob = &cpu_data[cpub]; + + if (infoa->package != infob->package) + return false; + + if (infoa->core != infob->core) + return false; + + return true; +} + +static inline unsigned long cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo) +{ + return cpuinfo->asid_mask; +} + +static inline void set_cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo, + unsigned long asid_mask) +{ + cpuinfo->asid_mask = asid_mask; +} + +#endif /* __ASM_CPU_INFO_H */ diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h new file mode 100644 index 0000000000..48b9f7168b --- /dev/null +++ b/arch/loongarch/include/asm/cpu.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * cpu.h: Values of the PRID register used to match up + * various LoongArch CPU types. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_CPU_H +#define _ASM_CPU_H + +/* + * As described in LoongArch specs from Loongson Technology, the PRID register + * (CPUCFG.00) has the following layout: + * + * +---------------+----------------+------------+--------------------+ + * | Reserved | Company ID | Series ID | Product ID | + * +---------------+----------------+------------+--------------------+ + * 31 24 23 16 15 12 11 0 + */ + +/* + * Assigned Company values for bits 23:16 of the PRID register. + */ + +#define PRID_COMP_MASK 0xff0000 + +#define PRID_COMP_LOONGSON 0x140000 + +/* + * Assigned Series ID values for bits 15:12 of the PRID register. In order + * to detect a certain CPU type exactly eventually additional registers may + * need to be examined. + */ + +#define PRID_SERIES_MASK 0xf000 + +#define PRID_SERIES_LA132 0x8000 /* Loongson 32bit */ +#define PRID_SERIES_LA264 0xa000 /* Loongson 64bit, 2-issue */ +#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit, 3-issue */ +#define PRID_SERIES_LA464 0xc000 /* Loongson 64bit, 4-issue */ +#define PRID_SERIES_LA664 0xd000 /* Loongson 64bit, 6-issue */ + +/* + * Particular Product ID values for bits 11:0 of the PRID register. + */ + +#define PRID_PRODUCT_MASK 0x0fff + +#if !defined(__ASSEMBLY__) + +enum cpu_type_enum { + CPU_UNKNOWN, + CPU_LOONGSON32, + CPU_LOONGSON64, + CPU_LAST +}; + +#endif /* !__ASSEMBLY */ + +/* + * ISA Level encodings + * + */ + +#define LOONGARCH_CPU_ISA_LA32R 0x00000001 +#define LOONGARCH_CPU_ISA_LA32S 0x00000002 +#define LOONGARCH_CPU_ISA_LA64 0x00000004 + +#define LOONGARCH_CPU_ISA_32BIT (LOONGARCH_CPU_ISA_LA32R | LOONGARCH_CPU_ISA_LA32S) +#define LOONGARCH_CPU_ISA_64BIT LOONGARCH_CPU_ISA_LA64 + +/* + * CPU Option encodings + */ +#define CPU_FEATURE_CPUCFG 0 /* CPU has CPUCFG */ +#define CPU_FEATURE_LAM 1 /* CPU has Atomic instructions */ +#define CPU_FEATURE_UAL 2 /* CPU supports unaligned access */ +#define CPU_FEATURE_FPU 3 /* CPU has FPU */ +#define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */ +#define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */ +#define CPU_FEATURE_CRC32 6 /* CPU has CRC32 instructions */ +#define CPU_FEATURE_COMPLEX 7 /* CPU has Complex instructions */ +#define CPU_FEATURE_CRYPTO 8 /* CPU has Crypto instructions */ +#define CPU_FEATURE_LVZ 9 /* CPU has Virtualization extension */ +#define CPU_FEATURE_LBT_X86 10 /* CPU has X86 Binary Translation */ +#define CPU_FEATURE_LBT_ARM 11 /* CPU has ARM Binary Translation */ +#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */ +#define CPU_FEATURE_TLB 13 /* CPU has TLB */ +#define CPU_FEATURE_CSR 14 /* CPU has CSR */ +#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */ +#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */ +#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */ +#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */ +#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */ +#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */ +#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */ +#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */ +#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */ +#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ +#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ +#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */ + +#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) +#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) +#define LOONGARCH_CPU_UAL BIT_ULL(CPU_FEATURE_UAL) +#define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU) +#define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX) +#define LOONGARCH_CPU_LASX BIT_ULL(CPU_FEATURE_LASX) +#define LOONGARCH_CPU_CRC32 BIT_ULL(CPU_FEATURE_CRC32) +#define LOONGARCH_CPU_COMPLEX BIT_ULL(CPU_FEATURE_COMPLEX) +#define LOONGARCH_CPU_CRYPTO BIT_ULL(CPU_FEATURE_CRYPTO) +#define LOONGARCH_CPU_LVZ BIT_ULL(CPU_FEATURE_LVZ) +#define LOONGARCH_CPU_LBT_X86 BIT_ULL(CPU_FEATURE_LBT_X86) +#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM) +#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS) +#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB) +#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR) +#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH) +#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT) +#define LOONGARCH_CPU_CSRIPI BIT_ULL(CPU_FEATURE_CSRIPI) +#define LOONGARCH_CPU_EXTIOI BIT_ULL(CPU_FEATURE_EXTIOI) +#define LOONGARCH_CPU_PREFETCH BIT_ULL(CPU_FEATURE_PREFETCH) +#define LOONGARCH_CPU_PMP BIT_ULL(CPU_FEATURE_PMP) +#define LOONGARCH_CPU_SCALEFREQ BIT_ULL(CPU_FEATURE_SCALEFREQ) +#define LOONGARCH_CPU_FLATMODE BIT_ULL(CPU_FEATURE_FLATMODE) +#define LOONGARCH_CPU_EIODECODE BIT_ULL(CPU_FEATURE_EIODECODE) +#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID) +#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR) +#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW) + +#endif /* _ASM_CPU_H */ diff --git a/arch/loongarch/include/asm/cpufeature.h b/arch/loongarch/include/asm/cpufeature.h new file mode 100644 index 0000000000..4da22a8e63 --- /dev/null +++ b/arch/loongarch/include/asm/cpufeature.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CPU feature definitions for module loading, used by + * module_cpu_feature_match(), see uapi/asm/hwcap.h for LoongArch CPU features. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_CPUFEATURE_H +#define __ASM_CPUFEATURE_H + +#include <uapi/asm/hwcap.h> +#include <asm/elf.h> + +#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) + +#define cpu_feature(x) ilog2(HWCAP_ ## x) + +static inline bool cpu_have_feature(unsigned int num) +{ + return elf_hwcap & (1UL << num); +} + +#endif /* __ASM_CPUFEATURE_H */ diff --git a/arch/loongarch/include/asm/delay.h b/arch/loongarch/include/asm/delay.h new file mode 100644 index 0000000000..36d7751913 --- /dev/null +++ b/arch/loongarch/include/asm/delay.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_DELAY_H +#define _ASM_DELAY_H + +#include <linux/param.h> + +extern void __delay(unsigned long cycles); +extern void __ndelay(unsigned long ns); +extern void __udelay(unsigned long us); + +#define ndelay(ns) __ndelay(ns) +#define udelay(us) __udelay(us) + +/* make sure "usecs *= ..." in udelay do not overflow. */ +#if HZ >= 1000 +#define MAX_UDELAY_MS 1 +#elif HZ <= 200 +#define MAX_UDELAY_MS 5 +#else +#define MAX_UDELAY_MS (1000 / HZ) +#endif + +#endif /* _ASM_DELAY_H */ diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h new file mode 100644 index 0000000000..75ccd808a2 --- /dev/null +++ b/arch/loongarch/include/asm/dma-direct.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _LOONGARCH_DMA_DIRECT_H +#define _LOONGARCH_DMA_DIRECT_H + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); + +#endif /* _LOONGARCH_DMA_DIRECT_H */ diff --git a/arch/loongarch/include/asm/dma.h b/arch/loongarch/include/asm/dma.h new file mode 100644 index 0000000000..1a8866319f --- /dev/null +++ b/arch/loongarch/include/asm/dma.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_DMA_H +#define __ASM_DMA_H + +#define MAX_DMA_ADDRESS PAGE_OFFSET +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + +#endif diff --git a/arch/loongarch/include/asm/dmi.h b/arch/loongarch/include/asm/dmi.h new file mode 100644 index 0000000000..6054934177 --- /dev/null +++ b/arch/loongarch/include/asm/dmi.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_DMI_H +#define _ASM_DMI_H + +#include <linux/io.h> +#include <linux/memblock.h> + +#define dmi_early_remap(x, l) dmi_remap(x, l) +#define dmi_early_unmap(x, l) dmi_unmap(x) +#define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE) + +static inline void *dmi_remap(u64 phys_addr, unsigned long size) +{ + return ((void *)TO_CACHE(phys_addr)); +} + +static inline void dmi_unmap(void *addr) +{ +} + +#endif /* _ASM_DMI_H */ diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h new file mode 100644 index 0000000000..91d81f9730 --- /dev/null +++ b/arch/loongarch/include/asm/efi.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LOONGARCH_EFI_H +#define _ASM_LOONGARCH_EFI_H + +#include <linux/efi.h> + +void __init efi_init(void); +void __init efi_runtime_init(void); +void __init *efi_fdt_pointer(void); +void efifb_setup_from_dmi(struct screen_info *si, const char *opt); + +#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000004 /* Bit 2: CSR.CRMD.IE */ + +#define arch_efi_call_virt_setup() +#define arch_efi_call_virt_teardown() + +#define EFI_ALLOC_ALIGN SZ_64K +#define EFI_RT_VIRTUAL_OFFSET CSR_DMW0_BASE + +static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) +{ + return ULONG_MAX; +} + +static inline unsigned long efi_get_kimg_min_align(void) +{ + return SZ_2M; +} + +#define EFI_KIMG_PREFERRED_ADDRESS PHYSADDR(VMLINUX_LOAD_ADDRESS) + +unsigned long kernel_entry_address(unsigned long kernel_addr); + +#endif /* _ASM_LOONGARCH_EFI_H */ diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h new file mode 100644 index 0000000000..f16bd42456 --- /dev/null +++ b/arch/loongarch/include/asm/elf.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_ELF_H +#define _ASM_ELF_H + +#include <linux/auxvec.h> +#include <linux/fs.h> +#include <uapi/linux/elf.h> + +#include <asm/current.h> +#include <asm/vdso.h> + +/* The ABI of a file. */ +#define EF_LOONGARCH_ABI_LP64_SOFT_FLOAT 0x1 +#define EF_LOONGARCH_ABI_LP64_SINGLE_FLOAT 0x2 +#define EF_LOONGARCH_ABI_LP64_DOUBLE_FLOAT 0x3 + +#define EF_LOONGARCH_ABI_ILP32_SOFT_FLOAT 0x5 +#define EF_LOONGARCH_ABI_ILP32_SINGLE_FLOAT 0x6 +#define EF_LOONGARCH_ABI_ILP32_DOUBLE_FLOAT 0x7 + +/* LoongArch relocation types used by the dynamic linker */ +#define R_LARCH_NONE 0 +#define R_LARCH_32 1 +#define R_LARCH_64 2 +#define R_LARCH_RELATIVE 3 +#define R_LARCH_COPY 4 +#define R_LARCH_JUMP_SLOT 5 +#define R_LARCH_TLS_DTPMOD32 6 +#define R_LARCH_TLS_DTPMOD64 7 +#define R_LARCH_TLS_DTPREL32 8 +#define R_LARCH_TLS_DTPREL64 9 +#define R_LARCH_TLS_TPREL32 10 +#define R_LARCH_TLS_TPREL64 11 +#define R_LARCH_IRELATIVE 12 +#define R_LARCH_MARK_LA 20 +#define R_LARCH_MARK_PCREL 21 +#define R_LARCH_SOP_PUSH_PCREL 22 +#define R_LARCH_SOP_PUSH_ABSOLUTE 23 +#define R_LARCH_SOP_PUSH_DUP 24 +#define R_LARCH_SOP_PUSH_GPREL 25 +#define R_LARCH_SOP_PUSH_TLS_TPREL 26 +#define R_LARCH_SOP_PUSH_TLS_GOT 27 +#define R_LARCH_SOP_PUSH_TLS_GD 28 +#define R_LARCH_SOP_PUSH_PLT_PCREL 29 +#define R_LARCH_SOP_ASSERT 30 +#define R_LARCH_SOP_NOT 31 +#define R_LARCH_SOP_SUB 32 +#define R_LARCH_SOP_SL 33 +#define R_LARCH_SOP_SR 34 +#define R_LARCH_SOP_ADD 35 +#define R_LARCH_SOP_AND 36 +#define R_LARCH_SOP_IF_ELSE 37 +#define R_LARCH_SOP_POP_32_S_10_5 38 +#define R_LARCH_SOP_POP_32_U_10_12 39 +#define R_LARCH_SOP_POP_32_S_10_12 40 +#define R_LARCH_SOP_POP_32_S_10_16 41 +#define R_LARCH_SOP_POP_32_S_10_16_S2 42 +#define R_LARCH_SOP_POP_32_S_5_20 43 +#define R_LARCH_SOP_POP_32_S_0_5_10_16_S2 44 +#define R_LARCH_SOP_POP_32_S_0_10_10_16_S2 45 +#define R_LARCH_SOP_POP_32_U 46 +#define R_LARCH_ADD8 47 +#define R_LARCH_ADD16 48 +#define R_LARCH_ADD24 49 +#define R_LARCH_ADD32 50 +#define R_LARCH_ADD64 51 +#define R_LARCH_SUB8 52 +#define R_LARCH_SUB16 53 +#define R_LARCH_SUB24 54 +#define R_LARCH_SUB32 55 +#define R_LARCH_SUB64 56 +#define R_LARCH_GNU_VTINHERIT 57 +#define R_LARCH_GNU_VTENTRY 58 +#define R_LARCH_B16 64 +#define R_LARCH_B21 65 +#define R_LARCH_B26 66 +#define R_LARCH_ABS_HI20 67 +#define R_LARCH_ABS_LO12 68 +#define R_LARCH_ABS64_LO20 69 +#define R_LARCH_ABS64_HI12 70 +#define R_LARCH_PCALA_HI20 71 +#define R_LARCH_PCALA_LO12 72 +#define R_LARCH_PCALA64_LO20 73 +#define R_LARCH_PCALA64_HI12 74 +#define R_LARCH_GOT_PC_HI20 75 +#define R_LARCH_GOT_PC_LO12 76 +#define R_LARCH_GOT64_PC_LO20 77 +#define R_LARCH_GOT64_PC_HI12 78 +#define R_LARCH_GOT_HI20 79 +#define R_LARCH_GOT_LO12 80 +#define R_LARCH_GOT64_LO20 81 +#define R_LARCH_GOT64_HI12 82 +#define R_LARCH_TLS_LE_HI20 83 +#define R_LARCH_TLS_LE_LO12 84 +#define R_LARCH_TLS_LE64_LO20 85 +#define R_LARCH_TLS_LE64_HI12 86 +#define R_LARCH_TLS_IE_PC_HI20 87 +#define R_LARCH_TLS_IE_PC_LO12 88 +#define R_LARCH_TLS_IE64_PC_LO20 89 +#define R_LARCH_TLS_IE64_PC_HI12 90 +#define R_LARCH_TLS_IE_HI20 91 +#define R_LARCH_TLS_IE_LO12 92 +#define R_LARCH_TLS_IE64_LO20 93 +#define R_LARCH_TLS_IE64_HI12 94 +#define R_LARCH_TLS_LD_PC_HI20 95 +#define R_LARCH_TLS_LD_HI20 96 +#define R_LARCH_TLS_GD_PC_HI20 97 +#define R_LARCH_TLS_GD_HI20 98 +#define R_LARCH_32_PCREL 99 +#define R_LARCH_RELAX 100 +#define R_LARCH_DELETE 101 +#define R_LARCH_ALIGN 102 +#define R_LARCH_PCREL20_S2 103 +#define R_LARCH_CFA 104 +#define R_LARCH_ADD6 105 +#define R_LARCH_SUB6 106 +#define R_LARCH_ADD_ULEB128 107 +#define R_LARCH_SUB_ULEB128 108 +#define R_LARCH_64_PCREL 109 + +#ifndef ELF_ARCH + +/* ELF register definitions */ + +/* + * General purpose have the following registers: + * Register Number + * GPRs 32 + * ORIG_A0 1 + * ERA 1 + * BADVADDR 1 + * CRMD 1 + * PRMD 1 + * EUEN 1 + * ECFG 1 + * ESTAT 1 + * Reserved 5 + */ +#define ELF_NGREG 45 + +/* + * Floating point have the following registers: + * Register Number + * FPR 32 + * FCC 1 + * FCSR 1 + */ +#define ELF_NFPREG 34 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs); + +#ifdef CONFIG_32BIT +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch elf32_check_arch + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 + +#define ELF_CORE_COPY_REGS(dest, regs) \ + loongarch_dump_regs32((u32 *)&(dest), (regs)); + +#endif /* CONFIG_32BIT */ + +#ifdef CONFIG_64BIT +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch elf64_check_arch + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 + +#define ELF_CORE_COPY_REGS(dest, regs) \ + loongarch_dump_regs64((u64 *)&(dest), (regs)); + +#endif /* CONFIG_64BIT */ + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_LOONGARCH + +#endif /* !defined(ELF_ARCH) */ + +#define loongarch_elf_check_machine(x) ((x)->e_machine == EM_LOONGARCH) + +#define vmcore_elf32_check_arch loongarch_elf_check_machine +#define vmcore_elf64_check_arch loongarch_elf_check_machine + +/* + * Return non-zero if HDR identifies an 32bit ELF binary. + */ +#define elf32_check_arch(hdr) \ +({ \ + int __res = 1; \ + struct elfhdr *__h = (hdr); \ + \ + if (!loongarch_elf_check_machine(__h)) \ + __res = 0; \ + if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ + __res = 0; \ + \ + __res; \ +}) + +/* + * Return non-zero if HDR identifies an 64bit ELF binary. + */ +#define elf64_check_arch(hdr) \ +({ \ + int __res = 1; \ + struct elfhdr *__h = (hdr); \ + \ + if (!loongarch_elf_check_machine(__h)) \ + __res = 0; \ + if (__h->e_ident[EI_CLASS] != ELFCLASS64) \ + __res = 0; \ + \ + __res; \ +}) + +#ifdef CONFIG_32BIT + +#define SET_PERSONALITY2(ex, state) \ +do { \ + current->thread.vdso = &vdso_info; \ + \ + if (personality(current->personality) != PER_LINUX) \ + set_personality(PER_LINUX); \ +} while (0) + +#endif /* CONFIG_32BIT */ + +#ifdef CONFIG_64BIT + +#define SET_PERSONALITY2(ex, state) \ +do { \ + unsigned int p; \ + \ + clear_thread_flag(TIF_32BIT_REGS); \ + clear_thread_flag(TIF_32BIT_ADDR); \ + \ + current->thread.vdso = &vdso_info; \ + \ + p = personality(current->personality); \ + if (p != PER_LINUX32 && p != PER_LINUX) \ + set_personality(PER_LINUX); \ +} while (0) + +#endif /* CONFIG_64BIT */ + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. This could be done in userspace, + * but it's not easy, and we've already done it here. + */ + +#define ELF_HWCAP (elf_hwcap) +extern unsigned int elf_hwcap; +#include <asm/hwcap.h> + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ + +#define ELF_PLATFORM __elf_platform +extern const char *__elf_platform; + +#define ELF_PLAT_INIT(_r, load_addr) do { \ + _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ + _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ + _r->regs[9] = _r->regs[10] /* syscall n */ = _r->regs[12] = 0; \ + _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ + _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ + _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ + _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \ + _r->regs[29] = _r->regs[30] = _r->regs[31] = 0; \ +} while (0) + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ + +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (unsigned long)current->mm->context.vdso); \ +} while (0) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +struct arch_elf_state { + int fp_abi; + int interp_fp_abi; +}; + +#define LOONGARCH_ABI_FP_ANY (0) + +#define INIT_ARCH_ELF_STATE { \ + .fp_abi = LOONGARCH_ABI_FP_ANY, \ + .interp_fp_abi = LOONGARCH_ABI_FP_ANY, \ +} + +extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, + bool is_interp, struct arch_elf_state *state); + +extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr, + struct arch_elf_state *state); + +#endif /* _ASM_ELF_H */ diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h new file mode 100644 index 0000000000..0fe2a098de --- /dev/null +++ b/arch/loongarch/include/asm/entry-common.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_LOONGARCH_ENTRY_COMMON_H +#define ARCH_LOONGARCH_ENTRY_COMMON_H + +#include <linux/sched.h> +#include <linux/processor.h> + +static inline bool on_thread_stack(void) +{ + return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); +} + +#endif diff --git a/arch/loongarch/include/asm/exception.h b/arch/loongarch/include/asm/exception.h new file mode 100644 index 0000000000..af74a3fdca --- /dev/null +++ b/arch/loongarch/include/asm/exception.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_EXCEPTION_H +#define __ASM_EXCEPTION_H + +#include <asm/ptrace.h> +#include <linux/kprobes.h> + +void show_registers(struct pt_regs *regs); + +asmlinkage void cache_parity_error(void); +asmlinkage void noinstr do_ade(struct pt_regs *regs); +asmlinkage void noinstr do_ale(struct pt_regs *regs); +asmlinkage void noinstr do_bce(struct pt_regs *regs); +asmlinkage void noinstr do_bp(struct pt_regs *regs); +asmlinkage void noinstr do_ri(struct pt_regs *regs); +asmlinkage void noinstr do_fpu(struct pt_regs *regs); +asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr); +asmlinkage void noinstr do_lsx(struct pt_regs *regs); +asmlinkage void noinstr do_lasx(struct pt_regs *regs); +asmlinkage void noinstr do_lbt(struct pt_regs *regs); +asmlinkage void noinstr do_watch(struct pt_regs *regs); +asmlinkage void noinstr do_syscall(struct pt_regs *regs); +asmlinkage void noinstr do_reserved(struct pt_regs *regs); +asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp); +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long write, unsigned long address); + +asmlinkage void handle_ade(void); +asmlinkage void handle_ale(void); +asmlinkage void handle_bce(void); +asmlinkage void handle_sys(void); +asmlinkage void handle_bp(void); +asmlinkage void handle_ri(void); +asmlinkage void handle_fpu(void); +asmlinkage void handle_fpe(void); +asmlinkage void handle_lsx(void); +asmlinkage void handle_lasx(void); +asmlinkage void handle_lbt(void); +asmlinkage void handle_watch(void); +asmlinkage void handle_reserved(void); +asmlinkage void handle_vint(void); +asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs); + +#endif /* __ASM_EXCEPTION_H */ diff --git a/arch/loongarch/include/asm/exec.h b/arch/loongarch/include/asm/exec.h new file mode 100644 index 0000000000..ba0220812e --- /dev/null +++ b/arch/loongarch/include/asm/exec.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_EXEC_H +#define _ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* _ASM_EXEC_H */ diff --git a/arch/loongarch/include/asm/extable.h b/arch/loongarch/include/asm/extable.h new file mode 100644 index 0000000000..5abf29f1bc --- /dev/null +++ b/arch/loongarch/include/asm/extable.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_EXTABLE_H +#define _ASM_LOONGARCH_EXTABLE_H + +/* + * The exception table consists of pairs of relative offsets: the first + * is the relative offset to an instruction that is allowed to fault, + * and the second is the relative offset at which the program should + * continue. No registers are modified, so it is entirely up to the + * continuation code to figure out what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + int insn, fixup; + short type, data; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ +do { \ + (a)->fixup = (b)->fixup + (delta); \ + (b)->fixup = (tmp).fixup - (delta); \ + (a)->type = (b)->type; \ + (b)->type = (tmp).type; \ + (a)->data = (b)->data; \ + (b)->data = (tmp).data; \ +} while (0) + +#ifdef CONFIG_BPF_JIT +bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); +#else +static inline +bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs) +{ + return false; +} +#endif /* !CONFIG_BPF_JIT */ + +bool fixup_exception(struct pt_regs *regs); + +#endif diff --git a/arch/loongarch/include/asm/fb.h b/arch/loongarch/include/asm/fb.h new file mode 100644 index 0000000000..0b218b10a9 --- /dev/null +++ b/arch/loongarch/include/asm/fb.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_FB_H_ +#define _ASM_FB_H_ + +#include <linux/compiler.h> +#include <linux/string.h> + +static inline void fb_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) +{ + memcpy(to, (void __force *)from, n); +} +#define fb_memcpy_fromio fb_memcpy_fromio + +static inline void fb_memcpy_toio(volatile void __iomem *to, const void *from, size_t n) +{ + memcpy((void __force *)to, from, n); +} +#define fb_memcpy_toio fb_memcpy_toio + +static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n) +{ + memset((void __force *)addr, c, n); +} +#define fb_memset fb_memset_io + +#include <asm-generic/fb.h> + +#endif /* _ASM_FB_H_ */ diff --git a/arch/loongarch/include/asm/fixmap.h b/arch/loongarch/include/asm/fixmap.h new file mode 100644 index 0000000000..d2e55ae55b --- /dev/null +++ b/arch/loongarch/include/asm/fixmap.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * fixmap.h: compile-time virtual memory allocation + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +#define NR_FIX_BTMAPS 64 + +enum fixed_addresses { + FIX_HOLE, + FIX_EARLYCON_MEM_BASE, + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXMAP_PAGE_IO PAGE_KERNEL_SUC + +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); + +#include <asm-generic/fixmap.h> + +#endif diff --git a/arch/loongarch/include/asm/fpregdef.h b/arch/loongarch/include/asm/fpregdef.h new file mode 100644 index 0000000000..e56610ae85 --- /dev/null +++ b/arch/loongarch/include/asm/fpregdef.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for the FPU register names + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_FPREGDEF_H +#define _ASM_FPREGDEF_H + +#define fa0 $f0 /* argument registers, fa0/fa1 reused as fv0/fv1 for return value */ +#define fa1 $f1 +#define fa2 $f2 +#define fa3 $f3 +#define fa4 $f4 +#define fa5 $f5 +#define fa6 $f6 +#define fa7 $f7 +#define ft0 $f8 /* caller saved */ +#define ft1 $f9 +#define ft2 $f10 +#define ft3 $f11 +#define ft4 $f12 +#define ft5 $f13 +#define ft6 $f14 +#define ft7 $f15 +#define ft8 $f16 +#define ft9 $f17 +#define ft10 $f18 +#define ft11 $f19 +#define ft12 $f20 +#define ft13 $f21 +#define ft14 $f22 +#define ft15 $f23 +#define fs0 $f24 /* callee saved */ +#define fs1 $f25 +#define fs2 $f26 +#define fs3 $f27 +#define fs4 $f28 +#define fs5 $f29 +#define fs6 $f30 +#define fs7 $f31 + +#ifndef CONFIG_AS_HAS_FCSR_CLASS +/* + * Current binutils expects *GPRs* at FCSR position for the FCSR + * operation instructions, so define aliases for those used. + */ +#define fcsr0 $r0 +#define fcsr1 $r1 +#define fcsr2 $r2 +#define fcsr3 $r3 +#else +#define fcsr0 $fcsr0 +#define fcsr1 $fcsr1 +#define fcsr2 $fcsr2 +#define fcsr3 $fcsr3 +#endif + +#endif /* _ASM_FPREGDEF_H */ diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h new file mode 100644 index 0000000000..c2d8962fda --- /dev/null +++ b/arch/loongarch/include/asm/fpu.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_FPU_H +#define _ASM_FPU_H + +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/ptrace.h> +#include <linux/thread_info.h> +#include <linux/bitops.h> + +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm/current.h> +#include <asm/loongarch.h> +#include <asm/processor.h> +#include <asm/ptrace.h> + +struct sigcontext; + +extern void kernel_fpu_begin(void); +extern void kernel_fpu_end(void); + +extern void _init_fpu(unsigned int); +extern void _save_fp(struct loongarch_fpu *); +extern void _restore_fp(struct loongarch_fpu *); + +extern void _save_lsx(struct loongarch_fpu *fpu); +extern void _restore_lsx(struct loongarch_fpu *fpu); +extern void _init_lsx_upper(void); +extern void _restore_lsx_upper(struct loongarch_fpu *fpu); + +extern void _save_lasx(struct loongarch_fpu *fpu); +extern void _restore_lasx(struct loongarch_fpu *fpu); +extern void _init_lasx_upper(void); +extern void _restore_lasx_upper(struct loongarch_fpu *fpu); + +static inline void enable_lsx(void); +static inline void disable_lsx(void); +static inline void save_lsx(struct task_struct *t); +static inline void restore_lsx(struct task_struct *t); + +static inline void enable_lasx(void); +static inline void disable_lasx(void); +static inline void save_lasx(struct task_struct *t); +static inline void restore_lasx(struct task_struct *t); + +/* + * Mask the FCSR Cause bits according to the Enable bits, observing + * that Unimplemented is always enabled. + */ +static inline unsigned long mask_fcsr_x(unsigned long fcsr) +{ + return fcsr & ((fcsr & FPU_CSR_ALL_E) << + (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))); +} + +static inline int is_fp_enabled(void) +{ + return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ? + 1 : 0; +} + +static inline int is_lsx_enabled(void) +{ + if (!cpu_has_lsx) + return 0; + + return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LSXEN) ? + 1 : 0; +} + +static inline int is_lasx_enabled(void) +{ + if (!cpu_has_lasx) + return 0; + + return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LASXEN) ? + 1 : 0; +} + +static inline int is_simd_enabled(void) +{ + return is_lsx_enabled() | is_lasx_enabled(); +} + +#define enable_fpu() set_csr_euen(CSR_EUEN_FPEN) + +#define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN) + +#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU) + +static inline int is_fpu_owner(void) +{ + return test_thread_flag(TIF_USEDFPU); +} + +static inline void __own_fpu(void) +{ + enable_fpu(); + set_thread_flag(TIF_USEDFPU); + KSTK_EUEN(current) |= CSR_EUEN_FPEN; +} + +static inline void own_fpu_inatomic(int restore) +{ + if (cpu_has_fpu && !is_fpu_owner()) { + __own_fpu(); + if (restore) + _restore_fp(¤t->thread.fpu); + } +} + +static inline void own_fpu(int restore) +{ + preempt_disable(); + own_fpu_inatomic(restore); + preempt_enable(); +} + +static inline void lose_fpu_inatomic(int save, struct task_struct *tsk) +{ + if (is_fpu_owner()) { + if (!is_simd_enabled()) { + if (save) + _save_fp(&tsk->thread.fpu); + disable_fpu(); + } else { + if (save) { + if (!is_lasx_enabled()) + save_lsx(tsk); + else + save_lasx(tsk); + } + disable_fpu(); + disable_lsx(); + disable_lasx(); + clear_tsk_thread_flag(tsk, TIF_USEDSIMD); + } + clear_tsk_thread_flag(tsk, TIF_USEDFPU); + } + KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); +} + +static inline void lose_fpu(int save) +{ + preempt_disable(); + lose_fpu_inatomic(save, current); + preempt_enable(); +} + +static inline void init_fpu(void) +{ + unsigned int fcsr = current->thread.fpu.fcsr; + + __own_fpu(); + _init_fpu(fcsr); + set_used_math(); +} + +static inline void save_fp(struct task_struct *tsk) +{ + if (cpu_has_fpu) + _save_fp(&tsk->thread.fpu); +} + +static inline void restore_fp(struct task_struct *tsk) +{ + if (cpu_has_fpu) + _restore_fp(&tsk->thread.fpu); +} + +static inline void save_fpu_regs(struct task_struct *tsk) +{ + unsigned int euen; + + if (tsk == current) { + preempt_disable(); + + euen = csr_read32(LOONGARCH_CSR_EUEN); + +#ifdef CONFIG_CPU_HAS_LASX + if (euen & CSR_EUEN_LASXEN) + _save_lasx(¤t->thread.fpu); + else +#endif +#ifdef CONFIG_CPU_HAS_LSX + if (euen & CSR_EUEN_LSXEN) + _save_lsx(¤t->thread.fpu); + else +#endif + if (euen & CSR_EUEN_FPEN) + _save_fp(¤t->thread.fpu); + + preempt_enable(); + } +} + +static inline int is_simd_owner(void) +{ + return test_thread_flag(TIF_USEDSIMD); +} + +#ifdef CONFIG_CPU_HAS_LSX + +static inline void enable_lsx(void) +{ + if (cpu_has_lsx) + csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN); +} + +static inline void disable_lsx(void) +{ + if (cpu_has_lsx) + csr_xchg32(0, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN); +} + +static inline void save_lsx(struct task_struct *t) +{ + if (cpu_has_lsx) + _save_lsx(&t->thread.fpu); +} + +static inline void restore_lsx(struct task_struct *t) +{ + if (cpu_has_lsx) + _restore_lsx(&t->thread.fpu); +} + +static inline void init_lsx_upper(void) +{ + if (cpu_has_lsx) + _init_lsx_upper(); +} + +static inline void restore_lsx_upper(struct task_struct *t) +{ + if (cpu_has_lsx) + _restore_lsx_upper(&t->thread.fpu); +} + +#else +static inline void enable_lsx(void) {} +static inline void disable_lsx(void) {} +static inline void save_lsx(struct task_struct *t) {} +static inline void restore_lsx(struct task_struct *t) {} +static inline void init_lsx_upper(void) {} +static inline void restore_lsx_upper(struct task_struct *t) {} +#endif + +#ifdef CONFIG_CPU_HAS_LASX + +static inline void enable_lasx(void) +{ + + if (cpu_has_lasx) + csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN); +} + +static inline void disable_lasx(void) +{ + if (cpu_has_lasx) + csr_xchg32(0, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN); +} + +static inline void save_lasx(struct task_struct *t) +{ + if (cpu_has_lasx) + _save_lasx(&t->thread.fpu); +} + +static inline void restore_lasx(struct task_struct *t) +{ + if (cpu_has_lasx) + _restore_lasx(&t->thread.fpu); +} + +static inline void init_lasx_upper(void) +{ + if (cpu_has_lasx) + _init_lasx_upper(); +} + +static inline void restore_lasx_upper(struct task_struct *t) +{ + if (cpu_has_lasx) + _restore_lasx_upper(&t->thread.fpu); +} + +#else +static inline void enable_lasx(void) {} +static inline void disable_lasx(void) {} +static inline void save_lasx(struct task_struct *t) {} +static inline void restore_lasx(struct task_struct *t) {} +static inline void init_lasx_upper(void) {} +static inline void restore_lasx_upper(struct task_struct *t) {} +#endif + +static inline int thread_lsx_context_live(void) +{ + if (!cpu_has_lsx) + return 0; + + return test_thread_flag(TIF_LSX_CTX_LIVE); +} + +static inline int thread_lasx_context_live(void) +{ + if (!cpu_has_lasx) + return 0; + + return test_thread_flag(TIF_LASX_CTX_LIVE); +} + +#endif /* _ASM_FPU_H */ diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h new file mode 100644 index 0000000000..a11996eb58 --- /dev/null +++ b/arch/loongarch/include/asm/ftrace.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_FTRACE_H +#define _ASM_LOONGARCH_FTRACE_H + +#define FTRACE_PLT_IDX 0 +#define FTRACE_REGS_PLT_IDX 1 +#define NR_FTRACE_PLTS 2 + +#ifdef CONFIG_FUNCTION_TRACER + +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + +#ifndef __ASSEMBLY__ + +#ifndef CONFIG_DYNAMIC_FTRACE + +#define mcount _mcount +extern void _mcount(void); +extern void prepare_ftrace_return(unsigned long self_addr, unsigned long callsite_sp, unsigned long old); + +#else + +struct dyn_ftrace; +struct dyn_arch_ftrace { }; + +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + +#define ftrace_init_nop ftrace_init_nop +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent); + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +struct ftrace_ops; + +struct ftrace_regs { + struct pt_regs regs; +}; + +static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) +{ + return &fregs->regs; +} + +static __always_inline unsigned long +ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs) +{ + return instruction_pointer(&fregs->regs); +} + +static __always_inline void +ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip) +{ + regs_set_return_value(&fregs->regs, ip); +} + +#define ftrace_regs_get_argument(fregs, n) \ + regs_get_kernel_argument(&(fregs)->regs, n) +#define ftrace_regs_get_stack_pointer(fregs) \ + kernel_stack_pointer(&(fregs)->regs) +#define ftrace_regs_return_value(fregs) \ + regs_return_value(&(fregs)->regs) +#define ftrace_regs_set_return_value(fregs, ret) \ + regs_set_return_value(&(fregs)->regs, ret) +#define ftrace_override_function_with_return(fregs) \ + override_function_with_return(&(fregs)->regs) +#define ftrace_regs_query_register_offset(name) \ + regs_query_register_offset(name) + +#define ftrace_graph_func ftrace_graph_func +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs); + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +static inline void +__arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) +{ + regs->regs[13] = addr; /* t1 */ +} + +#define arch_ftrace_set_direct_caller(fregs, addr) \ + __arch_ftrace_set_direct_caller(&(fregs)->regs, addr) +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ + +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_FUNCTION_TRACER */ + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +struct fgraph_ret_regs { + /* a0 - a1 */ + unsigned long regs[2]; + + unsigned long fp; + unsigned long __unused; +}; + +static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs) +{ + return ret_regs->regs[0]; +} + +static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs) +{ + return ret_regs->fp; +} +#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */ +#endif + +#endif /* _ASM_LOONGARCH_FTRACE_H */ diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h new file mode 100644 index 0000000000..042ca4448e --- /dev/null +++ b/arch/loongarch/include/asm/futex.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/asm-extable.h> +#include <asm/barrier.h> +#include <asm/errno.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +{ \ + __asm__ __volatile__( \ + "1: ll.w %1, %4 # __futex_atomic_op\n" \ + " " insn " \n" \ + "2: sc.w $t0, %2 \n" \ + " beqz $t0, 1b \n" \ + "3: \n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ + : "=r" (ret), "=&r" (oldval), \ + "=ZC" (*uaddr) \ + : "0" (0), "ZC" (*uaddr), "Jr" (oparg) \ + : "memory", "t0"); \ +} + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret = 0; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("move $t0, %z5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add.w $t0, %1, %z5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or $t0, %1, %z5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and $t0, %1, %z5", ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor $t0, %1, %z5", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) +{ + int ret = 0; + u32 val = 0; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__( + "# futex_atomic_cmpxchg_inatomic \n" + "1: ll.w %1, %3 \n" + " bne %1, %z4, 3f \n" + " move $t0, %z5 \n" + "2: sc.w $t0, %2 \n" + " beqz $t0, 1b \n" + "3: \n" + __WEAK_LLSC_MB + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) + : "+r" (ret), "=&r" (val), "=ZC" (*uaddr) + : "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval) + : "memory", "t0"); + + *uval = val; + + return ret; +} + +#endif /* _ASM_FUTEX_H */ diff --git a/arch/loongarch/include/asm/gpr-num.h b/arch/loongarch/include/asm/gpr-num.h new file mode 100644 index 0000000000..996038da80 --- /dev/null +++ b/arch/loongarch/include/asm/gpr-num.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GPR_NUM_H +#define __ASM_GPR_NUM_H + +#ifdef __ASSEMBLY__ + + .equ .L__gpr_num_zero, 0 + .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + .equ .L__gpr_num_$r\num, \num + .endr + + /* ABI names of registers */ + .equ .L__gpr_num_$ra, 1 + .equ .L__gpr_num_$tp, 2 + .equ .L__gpr_num_$sp, 3 + .irp num,0,1,2,3,4,5,6,7 + .equ .L__gpr_num_$a\num, 4 + \num + .endr + .irp num,0,1,2,3,4,5,6,7,8 + .equ .L__gpr_num_$t\num, 12 + \num + .endr + .equ .L__gpr_num_$s9, 22 + .equ .L__gpr_num_$fp, 22 + .irp num,0,1,2,3,4,5,6,7,8 + .equ .L__gpr_num_$s\num, 23 + \num + .endr + +#else /* __ASSEMBLY__ */ + +#define __DEFINE_ASM_GPR_NUMS \ +" .equ .L__gpr_num_zero, 0\n" \ +" .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ +" .equ .L__gpr_num_$r\\num, \\num\n" \ +" .endr\n" \ +" .equ .L__gpr_num_$ra, 1\n" \ +" .equ .L__gpr_num_$tp, 2\n" \ +" .equ .L__gpr_num_$sp, 3\n" \ +" .irp num,0,1,2,3,4,5,6,7\n" \ +" .equ .L__gpr_num_$a\\num, 4 + \\num\n" \ +" .endr\n" \ +" .irp num,0,1,2,3,4,5,6,7,8\n" \ +" .equ .L__gpr_num_$t\\num, 12 + \\num\n" \ +" .endr\n" \ +" .equ .L__gpr_num_$s9, 22\n" \ +" .equ .L__gpr_num_$fp, 22\n" \ +" .irp num,0,1,2,3,4,5,6,7,8\n" \ +" .equ .L__gpr_num_$s\\num, 23 + \\num\n" \ +" .endr\n" \ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GPR_NUM_H */ diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h new file mode 100644 index 0000000000..0ef3b18f89 --- /dev/null +++ b/arch/loongarch/include/asm/hardirq.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_HARDIRQ_H +#define _ASM_HARDIRQ_H + +#include <linux/cache.h> +#include <linux/threads.h> +#include <linux/irq.h> + +extern void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#define NR_IPI 2 + +typedef struct { + unsigned int ipi_irqs[NR_IPI]; + unsigned int __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +#define __ARCH_IRQ_STAT + +#endif /* _ASM_HARDIRQ_H */ diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h new file mode 100644 index 0000000000..aa44b3fe43 --- /dev/null +++ b/arch/loongarch/include/asm/hugetlb.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_HUGETLB_H +#define __ASM_HUGETLB_H + +#include <asm/page.h> + +uint64_t pmd_to_entrylo(unsigned long pmd_val); + +#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, + unsigned long len) +{ + unsigned long task_size = STACK_TOP; + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + if (len > task_size) + return -ENOMEM; + if (task_size - len < addr) + return -EINVAL; + return 0; +} + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t clear; + pte_t pte = *ptep; + + pte_val(clear) = (unsigned long)invalid_pte_table; + set_pte_at(mm, addr, ptep, clear); + return pte; +} + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t pte; + + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + flush_tlb_page(vma, addr); + return pte; +} + +#define __HAVE_ARCH_HUGE_PTE_NONE +static inline int huge_pte_none(pte_t pte) +{ + unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL; + return !val || (val == (unsigned long)invalid_pte_table); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t pte, + int dirty) +{ + int changed = !pte_same(*ptep, pte); + + if (changed) { + set_pte_at(vma->vm_mm, addr, ptep, pte); + /* + * There could be some standard sized pages in there, + * get them all. + */ + flush_tlb_range(vma, addr, addr + HPAGE_SIZE); + } + return changed; +} + +#include <asm-generic/hugetlb.h> + +#endif /* __ASM_HUGETLB_H */ diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h new file mode 100644 index 0000000000..21447fb1ef --- /dev/null +++ b/arch/loongarch/include/asm/hw_breakpoint.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022-2023 Loongson Technology Corporation Limited + */ +#ifndef __ASM_HW_BREAKPOINT_H +#define __ASM_HW_BREAKPOINT_H + +#include <asm/loongarch.h> + +#ifdef __KERNEL__ + +/* Breakpoint */ +#define LOONGARCH_BREAKPOINT_EXECUTE (0 << 0) + +/* Watchpoints */ +#define LOONGARCH_BREAKPOINT_LOAD (1 << 0) +#define LOONGARCH_BREAKPOINT_STORE (1 << 1) + +struct arch_hw_breakpoint_ctrl { + u32 __reserved : 28, + len : 2, + type : 2; +}; + +struct arch_hw_breakpoint { + u64 address; + u64 mask; + struct arch_hw_breakpoint_ctrl ctrl; +}; + +/* Lengths */ +#define LOONGARCH_BREAKPOINT_LEN_1 0b11 +#define LOONGARCH_BREAKPOINT_LEN_2 0b10 +#define LOONGARCH_BREAKPOINT_LEN_4 0b01 +#define LOONGARCH_BREAKPOINT_LEN_8 0b00 + +/* + * Limits. + * Changing these will require modifications to the register accessors. + */ +#define LOONGARCH_MAX_BRP 8 +#define LOONGARCH_MAX_WRP 8 + +/* Virtual debug register bases. */ +#define CSR_CFG_ADDR 0 +#define CSR_CFG_MASK (CSR_CFG_ADDR + LOONGARCH_MAX_BRP) +#define CSR_CFG_CTRL (CSR_CFG_MASK + LOONGARCH_MAX_BRP) +#define CSR_CFG_ASID (CSR_CFG_CTRL + LOONGARCH_MAX_WRP) + +/* Debug register names. */ +#define LOONGARCH_CSR_NAME_ADDR ADDR +#define LOONGARCH_CSR_NAME_MASK MASK +#define LOONGARCH_CSR_NAME_CTRL CTRL +#define LOONGARCH_CSR_NAME_ASID ASID + +/* Accessor macros for the debug registers. */ +#define LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL) \ +do { \ + if (T == 0) \ + VAL = csr_read64(LOONGARCH_CSR_##IB##N##REG); \ + else \ + VAL = csr_read64(LOONGARCH_CSR_##DB##N##REG); \ +} while (0) + +#define LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL) \ +do { \ + if (T == 0) \ + csr_write64(VAL, LOONGARCH_CSR_##IB##N##REG); \ + else \ + csr_write64(VAL, LOONGARCH_CSR_##DB##N##REG); \ +} while (0) + +/* Exact number */ +#define CSR_FWPC_NUM 0x3f +#define CSR_MWPC_NUM 0x3f + +#define CTRL_PLV_ENABLE 0x1e + +#define MWPnCFG3_LoadEn 8 +#define MWPnCFG3_StoreEn 9 + +#define MWPnCFG3_Type_mask 0x3 +#define MWPnCFG3_Size_mask 0x3 + +static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) +{ + return (ctrl.len << 10) | (ctrl.type << 8); +} + +static inline void decode_ctrl_reg(u32 reg, struct arch_hw_breakpoint_ctrl *ctrl) +{ + reg >>= 8; + ctrl->type = reg & MWPnCFG3_Type_mask; + reg >>= 2; + ctrl->len = reg & MWPnCFG3_Size_mask; +} + +struct task_struct; +struct notifier_block; +struct perf_event; +struct perf_event_attr; + +extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, + int *gen_len, int *gen_type, int *offset); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); + +extern int arch_install_hw_breakpoint(struct perf_event *bp); +extern void arch_uninstall_hw_breakpoint(struct perf_event *bp); +extern int hw_breakpoint_slots(int type); +extern void hw_breakpoint_pmu_read(struct perf_event *bp); + +void breakpoint_handler(struct pt_regs *regs); +void watchpoint_handler(struct pt_regs *regs); + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +extern void ptrace_hw_copy_thread(struct task_struct *task); +extern void hw_breakpoint_thread_switch(struct task_struct *next); +#else +static inline void ptrace_hw_copy_thread(struct task_struct *task) +{ +} +static inline void hw_breakpoint_thread_switch(struct task_struct *next) +{ +} +#endif + +/* Determine number of BRP registers available. */ +static inline int get_num_brps(void) +{ + return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM; +} + +/* Determine number of WRP registers available. */ +static inline int get_num_wrps(void) +{ + return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM; +} + +#endif /* __KERNEL__ */ +#endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h new file mode 100644 index 0000000000..af4f4e8fbd --- /dev/null +++ b/arch/loongarch/include/asm/hw_irq.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_HW_IRQ_H +#define __ASM_HW_IRQ_H + +#include <linux/atomic.h> + +extern atomic_t irq_err_count; + +/* + * interrupt-retrigger: NOP for now. This may not be appropriate for all + * machines, we'll see ... + */ + +#endif /* __ASM_HW_IRQ_H */ diff --git a/arch/loongarch/include/asm/idle.h b/arch/loongarch/include/asm/idle.h new file mode 100644 index 0000000000..f7f2b7dbf9 --- /dev/null +++ b/arch/loongarch/include/asm/idle.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_IDLE_H +#define __ASM_IDLE_H + +#include <linux/linkage.h> + +extern asmlinkage void __arch_cpu_idle(void); + +#endif /* __ASM_IDLE_H */ diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h new file mode 100644 index 0000000000..71e1ed4165 --- /dev/null +++ b/arch/loongarch/include/asm/inst.h @@ -0,0 +1,743 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_INST_H +#define _ASM_INST_H + +#include <linux/bitops.h> +#include <linux/types.h> +#include <asm/asm.h> +#include <asm/ptrace.h> + +#define INSN_NOP 0x03400000 +#define INSN_BREAK 0x002a0000 + +#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 +#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 +#define ADDR_IMMMASK_LU12IW 0x00000000FFFFF000 +#define ADDR_IMMMASK_ORI 0x0000000000000FFF +#define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000 + +#define ADDR_IMMSHIFT_LU52ID 52 +#define ADDR_IMMSBIDX_LU52ID 11 +#define ADDR_IMMSHIFT_LU32ID 32 +#define ADDR_IMMSBIDX_LU32ID 19 +#define ADDR_IMMSHIFT_LU12IW 12 +#define ADDR_IMMSBIDX_LU12IW 19 +#define ADDR_IMMSHIFT_ORI 0 +#define ADDR_IMMSBIDX_ORI 63 +#define ADDR_IMMSHIFT_ADDU16ID 16 +#define ADDR_IMMSBIDX_ADDU16ID 15 + +#define ADDR_IMM(addr, INSN) \ + (sign_extend64(((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN), ADDR_IMMSBIDX_##INSN)) + +enum reg0i15_op { + break_op = 0x54, +}; + +enum reg0i26_op { + b_op = 0x14, + bl_op = 0x15, +}; + +enum reg1i20_op { + lu12iw_op = 0x0a, + lu32id_op = 0x0b, + pcaddi_op = 0x0c, + pcalau12i_op = 0x0d, + pcaddu12i_op = 0x0e, + pcaddu18i_op = 0x0f, +}; + +enum reg1i21_op { + beqz_op = 0x10, + bnez_op = 0x11, + bceqz_op = 0x12, /* bits[9:8] = 0x00 */ + bcnez_op = 0x12, /* bits[9:8] = 0x01 */ +}; + +enum reg2_op { + revb2h_op = 0x0c, + revb4h_op = 0x0d, + revb2w_op = 0x0e, + revbd_op = 0x0f, + revh2w_op = 0x10, + revhd_op = 0x11, +}; + +enum reg2i5_op { + slliw_op = 0x81, + srliw_op = 0x89, + sraiw_op = 0x91, +}; + +enum reg2i6_op { + sllid_op = 0x41, + srlid_op = 0x45, + sraid_op = 0x49, +}; + +enum reg2i12_op { + addiw_op = 0x0a, + addid_op = 0x0b, + lu52id_op = 0x0c, + andi_op = 0x0d, + ori_op = 0x0e, + xori_op = 0x0f, + ldb_op = 0xa0, + ldh_op = 0xa1, + ldw_op = 0xa2, + ldd_op = 0xa3, + stb_op = 0xa4, + sth_op = 0xa5, + stw_op = 0xa6, + std_op = 0xa7, + ldbu_op = 0xa8, + ldhu_op = 0xa9, + ldwu_op = 0xaa, + flds_op = 0xac, + fsts_op = 0xad, + fldd_op = 0xae, + fstd_op = 0xaf, +}; + +enum reg2i14_op { + llw_op = 0x20, + scw_op = 0x21, + lld_op = 0x22, + scd_op = 0x23, + ldptrw_op = 0x24, + stptrw_op = 0x25, + ldptrd_op = 0x26, + stptrd_op = 0x27, +}; + +enum reg2i16_op { + jirl_op = 0x13, + beq_op = 0x16, + bne_op = 0x17, + blt_op = 0x18, + bge_op = 0x19, + bltu_op = 0x1a, + bgeu_op = 0x1b, +}; + +enum reg2bstrd_op { + bstrinsd_op = 0x2, + bstrpickd_op = 0x3, +}; + +enum reg3_op { + asrtle_op = 0x02, + asrtgt_op = 0x03, + addw_op = 0x20, + addd_op = 0x21, + subw_op = 0x22, + subd_op = 0x23, + nor_op = 0x28, + and_op = 0x29, + or_op = 0x2a, + xor_op = 0x2b, + orn_op = 0x2c, + andn_op = 0x2d, + sllw_op = 0x2e, + srlw_op = 0x2f, + sraw_op = 0x30, + slld_op = 0x31, + srld_op = 0x32, + srad_op = 0x33, + mulw_op = 0x38, + mulhw_op = 0x39, + mulhwu_op = 0x3a, + muld_op = 0x3b, + mulhd_op = 0x3c, + mulhdu_op = 0x3d, + divw_op = 0x40, + modw_op = 0x41, + divwu_op = 0x42, + modwu_op = 0x43, + divd_op = 0x44, + modd_op = 0x45, + divdu_op = 0x46, + moddu_op = 0x47, + ldxb_op = 0x7000, + ldxh_op = 0x7008, + ldxw_op = 0x7010, + ldxd_op = 0x7018, + stxb_op = 0x7020, + stxh_op = 0x7028, + stxw_op = 0x7030, + stxd_op = 0x7038, + ldxbu_op = 0x7040, + ldxhu_op = 0x7048, + ldxwu_op = 0x7050, + fldxs_op = 0x7060, + fldxd_op = 0x7068, + fstxs_op = 0x7070, + fstxd_op = 0x7078, + amswapw_op = 0x70c0, + amswapd_op = 0x70c1, + amaddw_op = 0x70c2, + amaddd_op = 0x70c3, + amandw_op = 0x70c4, + amandd_op = 0x70c5, + amorw_op = 0x70c6, + amord_op = 0x70c7, + amxorw_op = 0x70c8, + amxord_op = 0x70c9, + ammaxw_op = 0x70ca, + ammaxd_op = 0x70cb, + amminw_op = 0x70cc, + ammind_op = 0x70cd, + ammaxwu_op = 0x70ce, + ammaxdu_op = 0x70cf, + amminwu_op = 0x70d0, + ammindu_op = 0x70d1, + amswapdbw_op = 0x70d2, + amswapdbd_op = 0x70d3, + amadddbw_op = 0x70d4, + amadddbd_op = 0x70d5, + amanddbw_op = 0x70d6, + amanddbd_op = 0x70d7, + amordbw_op = 0x70d8, + amordbd_op = 0x70d9, + amxordbw_op = 0x70da, + amxordbd_op = 0x70db, + ammaxdbw_op = 0x70dc, + ammaxdbd_op = 0x70dd, + ammindbw_op = 0x70de, + ammindbd_op = 0x70df, + ammaxdbwu_op = 0x70e0, + ammaxdbdu_op = 0x70e1, + ammindbwu_op = 0x70e2, + ammindbdu_op = 0x70e3, + fldgts_op = 0x70e8, + fldgtd_op = 0x70e9, + fldles_op = 0x70ea, + fldled_op = 0x70eb, + fstgts_op = 0x70ec, + fstgtd_op = 0x70ed, + fstles_op = 0x70ee, + fstled_op = 0x70ef, + ldgtb_op = 0x70f0, + ldgth_op = 0x70f1, + ldgtw_op = 0x70f2, + ldgtd_op = 0x70f3, + ldleb_op = 0x70f4, + ldleh_op = 0x70f5, + ldlew_op = 0x70f6, + ldled_op = 0x70f7, + stgtb_op = 0x70f8, + stgth_op = 0x70f9, + stgtw_op = 0x70fa, + stgtd_op = 0x70fb, + stleb_op = 0x70fc, + stleh_op = 0x70fd, + stlew_op = 0x70fe, + stled_op = 0x70ff, +}; + +enum reg3sa2_op { + alslw_op = 0x02, + alslwu_op = 0x03, + alsld_op = 0x16, +}; + +struct reg0i15_format { + unsigned int immediate : 15; + unsigned int opcode : 17; +}; + +struct reg0i26_format { + unsigned int immediate_h : 10; + unsigned int immediate_l : 16; + unsigned int opcode : 6; +}; + +struct reg1i20_format { + unsigned int rd : 5; + unsigned int immediate : 20; + unsigned int opcode : 7; +}; + +struct reg1i21_format { + unsigned int immediate_h : 5; + unsigned int rj : 5; + unsigned int immediate_l : 16; + unsigned int opcode : 6; +}; + +struct reg2_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int opcode : 22; +}; + +struct reg2i5_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 5; + unsigned int opcode : 17; +}; + +struct reg2i6_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 6; + unsigned int opcode : 16; +}; + +struct reg2i12_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 12; + unsigned int opcode : 10; +}; + +struct reg2i14_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 14; + unsigned int opcode : 8; +}; + +struct reg2i16_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 16; + unsigned int opcode : 6; +}; + +struct reg2bstrd_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int lsbd : 6; + unsigned int msbd : 6; + unsigned int opcode : 10; +}; + +struct reg3_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int rk : 5; + unsigned int opcode : 17; +}; + +struct reg3sa2_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int rk : 5; + unsigned int immediate : 2; + unsigned int opcode : 15; +}; + +union loongarch_instruction { + unsigned int word; + struct reg0i15_format reg0i15_format; + struct reg0i26_format reg0i26_format; + struct reg1i20_format reg1i20_format; + struct reg1i21_format reg1i21_format; + struct reg2_format reg2_format; + struct reg2i5_format reg2i5_format; + struct reg2i6_format reg2i6_format; + struct reg2i12_format reg2i12_format; + struct reg2i14_format reg2i14_format; + struct reg2i16_format reg2i16_format; + struct reg2bstrd_format reg2bstrd_format; + struct reg3_format reg3_format; + struct reg3sa2_format reg3sa2_format; +}; + +#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction) + +enum loongarch_gpr { + LOONGARCH_GPR_ZERO = 0, + LOONGARCH_GPR_RA = 1, + LOONGARCH_GPR_TP = 2, + LOONGARCH_GPR_SP = 3, + LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */ + LOONGARCH_GPR_A1, /* Reused as V1 for return value */ + LOONGARCH_GPR_A2, + LOONGARCH_GPR_A3, + LOONGARCH_GPR_A4, + LOONGARCH_GPR_A5, + LOONGARCH_GPR_A6, + LOONGARCH_GPR_A7, + LOONGARCH_GPR_T0 = 12, + LOONGARCH_GPR_T1, + LOONGARCH_GPR_T2, + LOONGARCH_GPR_T3, + LOONGARCH_GPR_T4, + LOONGARCH_GPR_T5, + LOONGARCH_GPR_T6, + LOONGARCH_GPR_T7, + LOONGARCH_GPR_T8, + LOONGARCH_GPR_FP = 22, + LOONGARCH_GPR_S0 = 23, + LOONGARCH_GPR_S1, + LOONGARCH_GPR_S2, + LOONGARCH_GPR_S3, + LOONGARCH_GPR_S4, + LOONGARCH_GPR_S5, + LOONGARCH_GPR_S6, + LOONGARCH_GPR_S7, + LOONGARCH_GPR_S8, + LOONGARCH_GPR_MAX +}; + +#define is_imm12_negative(val) is_imm_negative(val, 12) + +static inline bool is_imm_negative(unsigned long val, unsigned int bit) +{ + return val & (1UL << (bit - 1)); +} + +static inline bool is_break_ins(union loongarch_instruction *ip) +{ + return ip->reg0i15_format.opcode == break_op; +} + +static inline bool is_pc_ins(union loongarch_instruction *ip) +{ + return ip->reg1i20_format.opcode >= pcaddi_op && + ip->reg1i20_format.opcode <= pcaddu18i_op; +} + +static inline bool is_branch_ins(union loongarch_instruction *ip) +{ + return ip->reg1i21_format.opcode >= beqz_op && + ip->reg1i21_format.opcode <= bgeu_op; +} + +static inline bool is_ra_save_ins(union loongarch_instruction *ip) +{ + /* st.d $ra, $sp, offset */ + return ip->reg2i12_format.opcode == std_op && + ip->reg2i12_format.rj == LOONGARCH_GPR_SP && + ip->reg2i12_format.rd == LOONGARCH_GPR_RA && + !is_imm12_negative(ip->reg2i12_format.immediate); +} + +static inline bool is_stack_alloc_ins(union loongarch_instruction *ip) +{ + /* addi.d $sp, $sp, -imm */ + return ip->reg2i12_format.opcode == addid_op && + ip->reg2i12_format.rj == LOONGARCH_GPR_SP && + ip->reg2i12_format.rd == LOONGARCH_GPR_SP && + is_imm12_negative(ip->reg2i12_format.immediate); +} + +static inline bool is_self_loop_ins(union loongarch_instruction *ip, struct pt_regs *regs) +{ + switch (ip->reg0i26_format.opcode) { + case b_op: + case bl_op: + if (ip->reg0i26_format.immediate_l == 0 + && ip->reg0i26_format.immediate_h == 0) + return true; + } + + switch (ip->reg1i21_format.opcode) { + case beqz_op: + case bnez_op: + case bceqz_op: + if (ip->reg1i21_format.immediate_l == 0 + && ip->reg1i21_format.immediate_h == 0) + return true; + } + + switch (ip->reg2i16_format.opcode) { + case beq_op: + case bne_op: + case blt_op: + case bge_op: + case bltu_op: + case bgeu_op: + if (ip->reg2i16_format.immediate == 0) + return true; + break; + case jirl_op: + if (regs->regs[ip->reg2i16_format.rj] + + ((unsigned long)ip->reg2i16_format.immediate << 2) == (unsigned long)ip) + return true; + } + + return false; +} + +void simu_pc(struct pt_regs *regs, union loongarch_instruction insn); +void simu_branch(struct pt_regs *regs, union loongarch_instruction insn); + +bool insns_not_supported(union loongarch_instruction insn); +bool insns_need_simulation(union loongarch_instruction insn); +void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs); + +int larch_insn_read(void *addr, u32 *insnp); +int larch_insn_write(void *addr, u32 insn); +int larch_insn_patch_text(void *addr, u32 insn); + +u32 larch_insn_gen_nop(void); +u32 larch_insn_gen_b(unsigned long pc, unsigned long dest); +u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest); + +u32 larch_insn_gen_break(int imm); + +u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk); +u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj); + +u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm); +u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm); +u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); +u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); + +static inline bool signed_imm_check(long val, unsigned int bit) +{ + return -(1L << (bit - 1)) <= val && val < (1L << (bit - 1)); +} + +static inline bool unsigned_imm_check(unsigned long val, unsigned int bit) +{ + return val < (1UL << bit); +} + +#define DEF_EMIT_REG0I15_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + int imm) \ +{ \ + insn->reg0i15_format.opcode = OP; \ + insn->reg0i15_format.immediate = imm; \ +} + +DEF_EMIT_REG0I15_FORMAT(break, break_op) + +#define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + int offset) \ +{ \ + unsigned int immediate_l, immediate_h; \ + \ + immediate_l = offset & 0xffff; \ + offset >>= 16; \ + immediate_h = offset & 0x3ff; \ + \ + insn->reg0i26_format.opcode = OP; \ + insn->reg0i26_format.immediate_l = immediate_l; \ + insn->reg0i26_format.immediate_h = immediate_h; \ +} + +DEF_EMIT_REG0I26_FORMAT(b, b_op) +DEF_EMIT_REG0I26_FORMAT(bl, bl_op) + +#define DEF_EMIT_REG1I20_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, int imm) \ +{ \ + insn->reg1i20_format.opcode = OP; \ + insn->reg1i20_format.immediate = imm; \ + insn->reg1i20_format.rd = rd; \ +} + +DEF_EMIT_REG1I20_FORMAT(lu12iw, lu12iw_op) +DEF_EMIT_REG1I20_FORMAT(lu32id, lu32id_op) +DEF_EMIT_REG1I20_FORMAT(pcaddu18i, pcaddu18i_op) + +#define DEF_EMIT_REG2_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj) \ +{ \ + insn->reg2_format.opcode = OP; \ + insn->reg2_format.rd = rd; \ + insn->reg2_format.rj = rj; \ +} + +DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op) +DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op) +DEF_EMIT_REG2_FORMAT(revbd, revbd_op) + +#define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i5_format.opcode = OP; \ + insn->reg2i5_format.immediate = imm; \ + insn->reg2i5_format.rd = rd; \ + insn->reg2i5_format.rj = rj; \ +} + +DEF_EMIT_REG2I5_FORMAT(slliw, slliw_op) +DEF_EMIT_REG2I5_FORMAT(srliw, srliw_op) +DEF_EMIT_REG2I5_FORMAT(sraiw, sraiw_op) + +#define DEF_EMIT_REG2I6_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i6_format.opcode = OP; \ + insn->reg2i6_format.immediate = imm; \ + insn->reg2i6_format.rd = rd; \ + insn->reg2i6_format.rj = rj; \ +} + +DEF_EMIT_REG2I6_FORMAT(sllid, sllid_op) +DEF_EMIT_REG2I6_FORMAT(srlid, srlid_op) +DEF_EMIT_REG2I6_FORMAT(sraid, sraid_op) + +#define DEF_EMIT_REG2I12_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i12_format.opcode = OP; \ + insn->reg2i12_format.immediate = imm; \ + insn->reg2i12_format.rd = rd; \ + insn->reg2i12_format.rj = rj; \ +} + +DEF_EMIT_REG2I12_FORMAT(addiw, addiw_op) +DEF_EMIT_REG2I12_FORMAT(addid, addid_op) +DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op) +DEF_EMIT_REG2I12_FORMAT(andi, andi_op) +DEF_EMIT_REG2I12_FORMAT(ori, ori_op) +DEF_EMIT_REG2I12_FORMAT(xori, xori_op) +DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op) +DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op) +DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op) +DEF_EMIT_REG2I12_FORMAT(ldd, ldd_op) +DEF_EMIT_REG2I12_FORMAT(stb, stb_op) +DEF_EMIT_REG2I12_FORMAT(sth, sth_op) +DEF_EMIT_REG2I12_FORMAT(stw, stw_op) +DEF_EMIT_REG2I12_FORMAT(std, std_op) + +#define DEF_EMIT_REG2I14_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i14_format.opcode = OP; \ + insn->reg2i14_format.immediate = imm; \ + insn->reg2i14_format.rd = rd; \ + insn->reg2i14_format.rj = rj; \ +} + +DEF_EMIT_REG2I14_FORMAT(llw, llw_op) +DEF_EMIT_REG2I14_FORMAT(scw, scw_op) +DEF_EMIT_REG2I14_FORMAT(lld, lld_op) +DEF_EMIT_REG2I14_FORMAT(scd, scd_op) +DEF_EMIT_REG2I14_FORMAT(ldptrw, ldptrw_op) +DEF_EMIT_REG2I14_FORMAT(stptrw, stptrw_op) +DEF_EMIT_REG2I14_FORMAT(ldptrd, ldptrd_op) +DEF_EMIT_REG2I14_FORMAT(stptrd, stptrd_op) + +#define DEF_EMIT_REG2I16_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rd, \ + int offset) \ +{ \ + insn->reg2i16_format.opcode = OP; \ + insn->reg2i16_format.immediate = offset; \ + insn->reg2i16_format.rj = rj; \ + insn->reg2i16_format.rd = rd; \ +} + +DEF_EMIT_REG2I16_FORMAT(beq, beq_op) +DEF_EMIT_REG2I16_FORMAT(bne, bne_op) +DEF_EMIT_REG2I16_FORMAT(blt, blt_op) +DEF_EMIT_REG2I16_FORMAT(bge, bge_op) +DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op) +DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op) +DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op) + +#define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int msbd, \ + int lsbd) \ +{ \ + insn->reg2bstrd_format.opcode = OP; \ + insn->reg2bstrd_format.msbd = msbd; \ + insn->reg2bstrd_format.lsbd = lsbd; \ + insn->reg2bstrd_format.rj = rj; \ + insn->reg2bstrd_format.rd = rd; \ +} + +DEF_EMIT_REG2BSTRD_FORMAT(bstrpickd, bstrpickd_op) + +#define DEF_EMIT_REG3_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rk) \ +{ \ + insn->reg3_format.opcode = OP; \ + insn->reg3_format.rd = rd; \ + insn->reg3_format.rj = rj; \ + insn->reg3_format.rk = rk; \ +} + +DEF_EMIT_REG3_FORMAT(addd, addd_op) +DEF_EMIT_REG3_FORMAT(subd, subd_op) +DEF_EMIT_REG3_FORMAT(muld, muld_op) +DEF_EMIT_REG3_FORMAT(divdu, divdu_op) +DEF_EMIT_REG3_FORMAT(moddu, moddu_op) +DEF_EMIT_REG3_FORMAT(and, and_op) +DEF_EMIT_REG3_FORMAT(or, or_op) +DEF_EMIT_REG3_FORMAT(xor, xor_op) +DEF_EMIT_REG3_FORMAT(sllw, sllw_op) +DEF_EMIT_REG3_FORMAT(slld, slld_op) +DEF_EMIT_REG3_FORMAT(srlw, srlw_op) +DEF_EMIT_REG3_FORMAT(srld, srld_op) +DEF_EMIT_REG3_FORMAT(sraw, sraw_op) +DEF_EMIT_REG3_FORMAT(srad, srad_op) +DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op) +DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op) +DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op) +DEF_EMIT_REG3_FORMAT(ldxd, ldxd_op) +DEF_EMIT_REG3_FORMAT(stxb, stxb_op) +DEF_EMIT_REG3_FORMAT(stxh, stxh_op) +DEF_EMIT_REG3_FORMAT(stxw, stxw_op) +DEF_EMIT_REG3_FORMAT(stxd, stxd_op) +DEF_EMIT_REG3_FORMAT(amaddw, amaddw_op) +DEF_EMIT_REG3_FORMAT(amaddd, amaddd_op) +DEF_EMIT_REG3_FORMAT(amandw, amandw_op) +DEF_EMIT_REG3_FORMAT(amandd, amandd_op) +DEF_EMIT_REG3_FORMAT(amorw, amorw_op) +DEF_EMIT_REG3_FORMAT(amord, amord_op) +DEF_EMIT_REG3_FORMAT(amxorw, amxorw_op) +DEF_EMIT_REG3_FORMAT(amxord, amxord_op) +DEF_EMIT_REG3_FORMAT(amswapw, amswapw_op) +DEF_EMIT_REG3_FORMAT(amswapd, amswapd_op) + +#define DEF_EMIT_REG3SA2_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rk, \ + int imm) \ +{ \ + insn->reg3sa2_format.opcode = OP; \ + insn->reg3sa2_format.immediate = imm; \ + insn->reg3sa2_format.rd = rd; \ + insn->reg3sa2_format.rj = rj; \ + insn->reg3sa2_format.rk = rk; \ +} + +DEF_EMIT_REG3SA2_FORMAT(alsld, alsld_op) + +struct pt_regs; + +void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int *pc); +unsigned long unaligned_read(void __user *addr, void *value, unsigned long n, bool sign); +unsigned long unaligned_write(void __user *addr, unsigned long value, unsigned long n); + +#endif /* _ASM_INST_H */ diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h new file mode 100644 index 0000000000..c486c2341b --- /dev/null +++ b/arch/loongarch/include/asm/io.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_IO_H +#define _ASM_IO_H + +#include <linux/kernel.h> +#include <linux/types.h> + +#include <asm/addrspace.h> +#include <asm/cpu.h> +#include <asm/page.h> +#include <asm/pgtable-bits.h> +#include <asm/string.h> + +/* + * Change "struct page" to physical address. + */ +#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) + +extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size); +extern void __init early_iounmap(void __iomem *addr, unsigned long size); + +#define early_memremap early_ioremap +#define early_memunmap early_iounmap + +#ifdef CONFIG_ARCH_IOREMAP + +static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, + unsigned long prot_val) +{ + if (prot_val & _CACHE_CC) + return (void __iomem *)(unsigned long)(CACHE_BASE + offset); + else + return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset); +} + +#define ioremap(offset, size) \ + ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC)) + +#define iounmap(addr) ((void)(addr)) + +#endif + +/* + * On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache(). + * They map bus memory into CPU space, the mapped memory is marked uncachable + * (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and + * cachable (_CACHE_CC) respectively for CPU access. + * + * @offset: bus address of the memory + * @size: size of the resource to map + */ +#define ioremap_wc(offset, size) \ + ioremap_prot((offset), (size), \ + pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)) + +#define ioremap_cache(offset, size) \ + ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) + +#define mmiowb() wmb() + +/* + * String version of I/O memory access operations. + */ +extern void __memset_io(volatile void __iomem *dst, int c, size_t count); +extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count); +extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count); +#define memset_io(c, v, l) __memset_io((c), (v), (l)) +#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l)) +#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l)) + +#include <asm-generic/io.h> + +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); +extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); + +#endif /* _ASM_IO_H */ diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h new file mode 100644 index 0000000000..218b4da0ea --- /dev/null +++ b/arch/loongarch/include/asm/irq.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_IRQ_H +#define _ASM_IRQ_H + +#include <linux/irqdomain.h> +#include <linux/irqreturn.h> + +#define IRQ_STACK_SIZE THREAD_SIZE +#define IRQ_STACK_START (IRQ_STACK_SIZE - 16) + +DECLARE_PER_CPU(unsigned long, irq_stack); + +/* + * The highest address on the IRQ stack contains a dummy frame which is + * structured as follows: + * + * top ------------ + * | task sp | <- irq_stack[cpu] + IRQ_STACK_START + * ------------ + * | | <- First frame of IRQ context + * ------------ + * + * task sp holds a copy of the task stack pointer where the struct pt_regs + * from exception entry can be found. + */ + +static inline bool on_irq_stack(int cpu, unsigned long sp) +{ + unsigned long low = per_cpu(irq_stack, cpu); + unsigned long high = low + IRQ_STACK_SIZE; + + return (low <= sp && sp <= high); +} + +void spurious_interrupt(void); + +#define NR_IRQS_LEGACY 16 + +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu); + +#define MAX_IO_PICS 2 +#define NR_IRQS (64 + (256 * MAX_IO_PICS)) + +struct acpi_vector_group { + int node; + int pci_segment; + struct irq_domain *parent; +}; +extern struct acpi_vector_group pch_group[MAX_IO_PICS]; +extern struct acpi_vector_group msi_group[MAX_IO_PICS]; + +#define CORES_PER_EIO_NODE 4 + +#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */ +#define LOONGSON_CPU_THSENS_VEC 14 /* CPU Thsens */ +#define LOONGSON_CPU_HT0_VEC 16 /* CPU HT0 irq vector base number */ +#define LOONGSON_CPU_HT1_VEC 24 /* CPU HT1 irq vector base number */ + +/* IRQ number definitions */ +#define LOONGSON_LPC_IRQ_BASE 0 +#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15) + +#define LOONGSON_CPU_IRQ_BASE 16 +#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14) + +#define LOONGSON_PCH_IRQ_BASE 64 +#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47) +#define LOONGSON_PCH_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 64 - 1) + +#define LOONGSON_MSI_IRQ_BASE (LOONGSON_PCH_IRQ_BASE + 64) +#define LOONGSON_MSI_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1) + +#define GSI_MIN_LPC_IRQ LOONGSON_LPC_IRQ_BASE +#define GSI_MAX_LPC_IRQ (LOONGSON_LPC_IRQ_BASE + 16 - 1) +#define GSI_MIN_CPU_IRQ LOONGSON_CPU_IRQ_BASE +#define GSI_MAX_CPU_IRQ (LOONGSON_CPU_IRQ_BASE + 48 - 1) +#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE +#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1) + +struct acpi_madt_lio_pic; +struct acpi_madt_eio_pic; +struct acpi_madt_ht_pic; +struct acpi_madt_bio_pic; +struct acpi_madt_msi_pic; +struct acpi_madt_lpc_pic; + +int liointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lio_pic *acpi_liointc); +int eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc); + +int htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec); +int pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc); +int pch_msi_acpi_init(struct irq_domain *parent, + struct acpi_madt_msi_pic *acpi_pchmsi); +int pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic); +int find_pch_pic(u32 gsi); +struct fwnode_handle *get_pch_msi_handle(int pci_segment); + +extern struct acpi_madt_lio_pic *acpi_liointc; +extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS]; + +extern struct acpi_madt_ht_pic *acpi_htintc; +extern struct acpi_madt_lpc_pic *acpi_pchlpc; +extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS]; +extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS]; + +extern struct fwnode_handle *cpuintc_handle; +extern struct fwnode_handle *liointc_handle; +extern struct fwnode_handle *pch_lpc_handle; +extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; + +extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev); + +#include <asm-generic/irq.h> + +#endif /* _ASM_IRQ_H */ diff --git a/arch/loongarch/include/asm/irq_regs.h b/arch/loongarch/include/asm/irq_regs.h new file mode 100644 index 0000000000..3d62d815bf --- /dev/null +++ b/arch/loongarch/include/asm/irq_regs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_IRQ_REGS_H +#define __ASM_IRQ_REGS_H + +#define ARCH_HAS_OWN_IRQ_REGS + +#include <linux/thread_info.h> + +static inline struct pt_regs *get_irq_regs(void) +{ + return current_thread_info()->regs; +} + +static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) +{ + struct pt_regs *old_regs; + + old_regs = get_irq_regs(); + current_thread_info()->regs = new_regs; + + return old_regs; +} + +#endif /* __ASM_IRQ_REGS_H */ diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h new file mode 100644 index 0000000000..319a8c616f --- /dev/null +++ b/arch/loongarch/include/asm/irqflags.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/stringify.h> +#include <asm/loongarch.h> + +static inline void arch_local_irq_enable(void) +{ + u32 flags = CSR_CRMD_IE; + __asm__ __volatile__( + "csrxchg %[val], %[mask], %[reg]\n\t" + : [val] "+r" (flags) + : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); +} + +static inline void arch_local_irq_disable(void) +{ + u32 flags = 0; + __asm__ __volatile__( + "csrxchg %[val], %[mask], %[reg]\n\t" + : [val] "+r" (flags) + : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); +} + +static inline unsigned long arch_local_irq_save(void) +{ + u32 flags = 0; + __asm__ __volatile__( + "csrxchg %[val], %[mask], %[reg]\n\t" + : [val] "+r" (flags) + : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + __asm__ __volatile__( + "csrxchg %[val], %[mask], %[reg]\n\t" + : [val] "+r" (flags) + : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); +} + +static inline unsigned long arch_local_save_flags(void) +{ + u32 flags; + __asm__ __volatile__( + "csrrd %[val], %[reg]\n\t" + : [val] "=r" (flags) + : [reg] "i" (LOONGARCH_CSR_CRMD) + : "memory"); + return flags; +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & CSR_CRMD_IE); +} + +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* #ifndef __ASSEMBLY__ */ + +#endif /* _ASM_IRQFLAGS_H */ diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h new file mode 100644 index 0000000000..3cea299a5e --- /dev/null +++ b/arch/loongarch/include/asm/jump_label.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 Loongson Technology Corporation Limited + * + * Based on arch/arm64/include/asm/jump_label.h + */ +#ifndef __ASM_JUMP_LABEL_H +#define __ASM_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +#define JUMP_TABLE_ENTRY \ + ".pushsection __jump_table, \"aw\" \n\t" \ + ".align 3 \n\t" \ + ".long 1b - ., %l[l_yes] - . \n\t" \ + ".quad %0 - . \n\t" \ + ".popsection \n\t" + +static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) +{ + asm_volatile_goto( + "1: nop \n\t" + JUMP_TABLE_ENTRY + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; + +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) +{ + asm_volatile_goto( + "1: b %l[l_yes] \n\t" + JUMP_TABLE_ENTRY + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; + +l_yes: + return true; +} + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h new file mode 100644 index 0000000000..cd6084f4e1 --- /dev/null +++ b/arch/loongarch/include/asm/kasan.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifndef __ASSEMBLY__ + +#include <linux/linkage.h> +#include <linux/mmzone.h> +#include <asm/addrspace.h> +#include <asm/io.h> +#include <asm/pgtable.h> + +#define KASAN_SHADOW_SCALE_SHIFT 3 +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) + +#define XRANGE_SHIFT (48) + +/* Valid address length */ +#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) +/* Used for taking out the valid address */ +#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0) +/* One segment whole address space size */ +#define XRANGE_SIZE (XRANGE_SHADOW_MASK + 1) + +/* 64-bit segment value. */ +#define XKPRANGE_UC_SEG (0x8000) +#define XKPRANGE_CC_SEG (0x9000) +#define XKVRANGE_VC_SEG (0xffff) + +/* Cached */ +#define XKPRANGE_CC_START CACHE_BASE +#define XKPRANGE_CC_SIZE XRANGE_SIZE +#define XKPRANGE_CC_KASAN_OFFSET (0) +#define XKPRANGE_CC_SHADOW_SIZE (XKPRANGE_CC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) +#define XKPRANGE_CC_SHADOW_END (XKPRANGE_CC_KASAN_OFFSET + XKPRANGE_CC_SHADOW_SIZE) + +/* UnCached */ +#define XKPRANGE_UC_START UNCACHE_BASE +#define XKPRANGE_UC_SIZE XRANGE_SIZE +#define XKPRANGE_UC_KASAN_OFFSET XKPRANGE_CC_SHADOW_END +#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) +#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE) + +/* VMALLOC (Cached or UnCached) */ +#define XKVRANGE_VC_START MODULES_VADDR +#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE) +#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END +#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) +#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE) + +/* KAsan shadow memory start right after vmalloc. */ +#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE) +#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET) +#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) + +#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET) +#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET) +#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET) + +extern bool kasan_early_stage; +extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; + +#define kasan_mem_to_shadow kasan_mem_to_shadow +void *kasan_mem_to_shadow(const void *addr); + +#define kasan_shadow_to_mem kasan_shadow_to_mem +const void *kasan_shadow_to_mem(const void *shadow_addr); + +#define kasan_arch_is_ready kasan_arch_is_ready +static __always_inline bool kasan_arch_is_ready(void) +{ + return !kasan_early_stage; +} + +#define addr_has_metadata addr_has_metadata +static __always_inline bool addr_has_metadata(const void *addr) +{ + return (kasan_mem_to_shadow((void *)addr) != NULL); +} + +void kasan_init(void); +asmlinkage void kasan_early_init(void); + +#endif +#endif diff --git a/arch/loongarch/include/asm/kdebug.h b/arch/loongarch/include/asm/kdebug.h new file mode 100644 index 0000000000..c00ed874bf --- /dev/null +++ b/arch/loongarch/include/asm/kdebug.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LOONGARCH_KDEBUG_H +#define _ASM_LOONGARCH_KDEBUG_H + +#include <linux/notifier.h> + +enum die_val { + DIE_OOPS = 1, + DIE_RI, + DIE_FP, + DIE_SIMD, + DIE_TRAP, +}; + +#endif /* _ASM_LOONGARCH_KDEBUG_H */ diff --git a/arch/loongarch/include/asm/kexec.h b/arch/loongarch/include/asm/kexec.h new file mode 100644 index 0000000000..cf95cd3eb2 --- /dev/null +++ b/arch/loongarch/include/asm/kexec.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * kexec.h for kexec + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_KEXEC_H +#define _ASM_KEXEC_H + +#include <asm/stacktrace.h> +#include <asm/page.h> + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + /* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +/* Reserve a page for the control code buffer */ +#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_LOONGARCH + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + memcpy(newregs, oldregs, sizeof(*newregs)); + else + prepare_frametrace(newregs); +} + +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + unsigned long efi_boot; + unsigned long cmdline_ptr; + unsigned long systable_ptr; +}; + +typedef void (*do_kexec_t)(unsigned long efi_boot, + unsigned long cmdline_ptr, + unsigned long systable_ptr, + unsigned long start_addr, + unsigned long first_ind_entry); + +struct kimage; +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; +extern void kexec_reboot(void); + +#ifdef CONFIG_SMP +extern atomic_t kexec_ready_to_reboot; +extern const unsigned char kexec_smp_wait[]; +#endif + +#endif /* !_ASM_KEXEC_H */ diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h new file mode 100644 index 0000000000..6c82aea1c9 --- /dev/null +++ b/arch/loongarch/include/asm/kfence.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * KFENCE support for LoongArch. + * + * Author: Enze Li <lienze@kylinos.cn> + * Copyright (C) 2022-2023 KylinSoft Corporation. + */ + +#ifndef _ASM_LOONGARCH_KFENCE_H +#define _ASM_LOONGARCH_KFENCE_H + +#include <linux/kfence.h> +#include <asm/pgtable.h> +#include <asm/tlb.h> + +static inline bool arch_kfence_init_pool(void) +{ + int err; + char *kfence_pool = __kfence_pool; + struct vm_struct *area; + + area = __get_vm_area_caller(KFENCE_POOL_SIZE, VM_IOREMAP, + KFENCE_AREA_START, KFENCE_AREA_END, + __builtin_return_address(0)); + if (!area) + return false; + + __kfence_pool = (char *)area->addr; + err = ioremap_page_range((unsigned long)__kfence_pool, + (unsigned long)__kfence_pool + KFENCE_POOL_SIZE, + virt_to_phys((void *)kfence_pool), PAGE_KERNEL); + if (err) { + free_vm_area(area); + __kfence_pool = kfence_pool; + return false; + } + + return true; +} + +/* Protect the given page and flush TLB. */ +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + pte_t *pte = virt_to_kpte(addr); + + if (WARN_ON(!pte) || pte_none(*pte)) + return false; + + if (protect) + set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT))); + else + set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT))); + + preempt_disable(); + local_flush_tlb_one(addr); + preempt_enable(); + + return true; +} + +#endif /* _ASM_LOONGARCH_KFENCE_H */ diff --git a/arch/loongarch/include/asm/kgdb.h b/arch/loongarch/include/asm/kgdb.h new file mode 100644 index 0000000000..2041ae58b1 --- /dev/null +++ b/arch/loongarch/include/asm/kgdb.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_KGDB_H +#define _ASM_LOONGARCH_KGDB_H + +#define GDB_SIZEOF_REG sizeof(u64) + +/* gdb remote procotol expects the following register layout. */ + +/* + * General purpose registers: + * r0-r31: 64 bit + * orig_a0: 64 bit + * pc : 64 bit + * csr_badvaddr: 64 bit + */ +#define DBG_PT_REGS_BASE 0 +#define DBG_PT_REGS_NUM 35 +#define DBG_PT_REGS_END (DBG_PT_REGS_BASE + DBG_PT_REGS_NUM - 1) + +/* + * Floating point registers: + * f0-f31: 64 bit + */ +#define DBG_FPR_BASE (DBG_PT_REGS_END + 1) +#define DBG_FPR_NUM 32 +#define DBG_FPR_END (DBG_FPR_BASE + DBG_FPR_NUM - 1) + +/* + * Condition Flag registers: + * fcc0-fcc8: 8 bit + */ +#define DBG_FCC_BASE (DBG_FPR_END + 1) +#define DBG_FCC_NUM 8 +#define DBG_FCC_END (DBG_FCC_BASE + DBG_FCC_NUM - 1) + +/* + * Floating-point Control and Status registers: + * fcsr: 32 bit + */ +#define DBG_FCSR_NUM 1 +#define DBG_FCSR (DBG_FCC_END + 1) + +#define DBG_MAX_REG_NUM (DBG_FCSR + 1) + +/* + * Size of I/O buffer for gdb packet. + * considering to hold all register contents, size is set + */ +#define BUFMAX 2048 + +/* + * Number of bytes required for gdb_regs buffer. + * PT_REGS and FPR: 8 bytes; FCSR: 4 bytes; FCC: 1 bytes. + * GDB fails to connect for size beyond this with error + * "'g' packet reply is too long" + */ +#define NUMREGBYTES ((DBG_PT_REGS_NUM + DBG_FPR_NUM) * GDB_SIZEOF_REG + DBG_FCC_NUM * 1 + DBG_FCSR_NUM * 4) + +#define BREAK_INSTR_SIZE 4 +#define CACHE_FLUSH_IS_SAFE 0 + +/* Register numbers of various important registers. */ +enum dbg_loongarch_regnum { + DBG_LOONGARCH_ZERO = 0, + DBG_LOONGARCH_RA, + DBG_LOONGARCH_TP, + DBG_LOONGARCH_SP, + DBG_LOONGARCH_A0, + DBG_LOONGARCH_FP = 22, + DBG_LOONGARCH_S0, + DBG_LOONGARCH_S1, + DBG_LOONGARCH_S2, + DBG_LOONGARCH_S3, + DBG_LOONGARCH_S4, + DBG_LOONGARCH_S5, + DBG_LOONGARCH_S6, + DBG_LOONGARCH_S7, + DBG_LOONGARCH_S8, + DBG_LOONGARCH_ORIG_A0, + DBG_LOONGARCH_PC, + DBG_LOONGARCH_BADV +}; + +void kgdb_breakinst(void); +void arch_kgdb_breakpoint(void); + +#ifdef CONFIG_KGDB +bool kgdb_breakpoint_handler(struct pt_regs *regs); +#else /* !CONFIG_KGDB */ +static inline bool kgdb_breakpoint_handler(struct pt_regs *regs) { return false; } +#endif /* CONFIG_KGDB */ + +#endif /* __ASM_KGDB_H_ */ diff --git a/arch/loongarch/include/asm/kprobes.h b/arch/loongarch/include/asm/kprobes.h new file mode 100644 index 0000000000..60fa753a01 --- /dev/null +++ b/arch/loongarch/include/asm/kprobes.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_LOONGARCH_KPROBES_H +#define __ASM_LOONGARCH_KPROBES_H + +#include <asm-generic/kprobes.h> + +#ifdef CONFIG_KPROBES + +#include <asm/inst.h> +#include <asm/cacheflush.h> + +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 2 + +#define flush_insn_slot(p) \ +do { \ + if (p->addr) \ + flush_icache_range((unsigned long)p->addr, \ + (unsigned long)p->addr + \ + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ +} while (0) + +#define kretprobe_blacklist_size 0 + +typedef u32 kprobe_opcode_t; + +/* Architecture specific copy of original instruction */ +struct arch_specific_insn { + /* copy of the original instruction */ + kprobe_opcode_t *insn; + /* restore address after simulation */ + unsigned long restore; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + unsigned long saved_status; + struct prev_kprobe prev_kprobe; +}; + +void arch_remove_kprobe(struct kprobe *p); +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr); +bool kprobe_breakpoint_handler(struct pt_regs *regs); +bool kprobe_singlestep_handler(struct pt_regs *regs); + +#else /* !CONFIG_KPROBES */ + +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) { return false; } +static inline bool kprobe_singlestep_handler(struct pt_regs *regs) { return false; } + +#endif /* CONFIG_KPROBES */ +#endif /* __ASM_LOONGARCH_KPROBES_H */ diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h new file mode 100644 index 0000000000..e671978bf5 --- /dev/null +++ b/arch/loongarch/include/asm/lbt.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Qi Hu <huqi@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LBT_H +#define _ASM_LBT_H + +#include <asm/cpu.h> +#include <asm/current.h> +#include <asm/loongarch.h> +#include <asm/processor.h> + +extern void _init_lbt(void); +extern void _save_lbt(struct loongarch_lbt *); +extern void _restore_lbt(struct loongarch_lbt *); + +static inline int is_lbt_enabled(void) +{ + if (!cpu_has_lbt) + return 0; + + return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LBTEN) ? + 1 : 0; +} + +static inline int is_lbt_owner(void) +{ + return test_thread_flag(TIF_USEDLBT); +} + +#ifdef CONFIG_CPU_HAS_LBT + +static inline void enable_lbt(void) +{ + if (cpu_has_lbt) + csr_xchg32(CSR_EUEN_LBTEN, CSR_EUEN_LBTEN, LOONGARCH_CSR_EUEN); +} + +static inline void disable_lbt(void) +{ + if (cpu_has_lbt) + csr_xchg32(0, CSR_EUEN_LBTEN, LOONGARCH_CSR_EUEN); +} + +static inline void __own_lbt(void) +{ + enable_lbt(); + set_thread_flag(TIF_USEDLBT); + KSTK_EUEN(current) |= CSR_EUEN_LBTEN; +} + +static inline void own_lbt_inatomic(int restore) +{ + if (cpu_has_lbt && !is_lbt_owner()) { + __own_lbt(); + if (restore) + _restore_lbt(¤t->thread.lbt); + } +} + +static inline void own_lbt(int restore) +{ + preempt_disable(); + own_lbt_inatomic(restore); + preempt_enable(); +} + +static inline void lose_lbt_inatomic(int save, struct task_struct *tsk) +{ + if (cpu_has_lbt && is_lbt_owner()) { + if (save) + _save_lbt(&tsk->thread.lbt); + + disable_lbt(); + clear_tsk_thread_flag(tsk, TIF_USEDLBT); + } + KSTK_EUEN(tsk) &= ~(CSR_EUEN_LBTEN); +} + +static inline void lose_lbt(int save) +{ + preempt_disable(); + lose_lbt_inatomic(save, current); + preempt_enable(); +} + +static inline void init_lbt(void) +{ + __own_lbt(); + _init_lbt(); +} +#else +static inline void own_lbt_inatomic(int restore) {} +static inline void lose_lbt_inatomic(int save, struct task_struct *tsk) {} +static inline void init_lbt(void) {} +static inline void lose_lbt(int save) {} +#endif + +static inline int thread_lbt_context_live(void) +{ + if (!cpu_has_lbt) + return 0; + + return test_thread_flag(TIF_LBT_CTX_LIVE); +} + +#endif /* _ASM_LBT_H */ diff --git a/arch/loongarch/include/asm/linkage.h b/arch/loongarch/include/asm/linkage.h new file mode 100644 index 0000000000..e2eca1a25b --- /dev/null +++ b/arch/loongarch/include/asm/linkage.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 2 +#define __ALIGN_STR __stringify(__ALIGN) + +#define SYM_FUNC_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \ + .cfi_startproc; + +#define SYM_FUNC_START_NOALIGN(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \ + .cfi_startproc; + +#define SYM_FUNC_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \ + .cfi_startproc; + +#define SYM_FUNC_START_LOCAL_NOALIGN(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \ + .cfi_startproc; + +#define SYM_FUNC_START_WEAK(name) \ + SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \ + .cfi_startproc; + +#define SYM_FUNC_START_WEAK_NOALIGN(name) \ + SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \ + .cfi_startproc; + +#define SYM_FUNC_END(name) \ + .cfi_endproc; \ + SYM_END(name, SYM_T_FUNC) + +#define SYM_CODE_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \ + .cfi_startproc; + +#define SYM_CODE_END(name) \ + .cfi_endproc; \ + SYM_END(name, SYM_T_NONE) + +#endif diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h new file mode 100644 index 0000000000..c49675852b --- /dev/null +++ b/arch/loongarch/include/asm/local.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ARCH_LOONGARCH_LOCAL_H +#define _ARCH_LOONGARCH_LOCAL_H + +#include <linux/percpu.h> +#include <linux/bitops.h> +#include <linux/atomic.h> +#include <asm/cmpxchg.h> + +typedef struct { + atomic_long_t a; +} local_t; + +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l, i) atomic_long_set(&(l)->a, (i)) + +#define local_add(i, l) atomic_long_add((i), (&(l)->a)) +#define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) + +/* + * Same as above, but return the result value + */ +static inline long local_add_return(long i, local_t *l) +{ + unsigned long result; + + __asm__ __volatile__( + " " __AMADD " %1, %2, %0 \n" + : "+ZB" (l->a.counter), "=&r" (result) + : "r" (i) + : "memory"); + result = result + i; + + return result; +} + +static inline long local_sub_return(long i, local_t *l) +{ + unsigned long result; + + __asm__ __volatile__( + " " __AMADD "%1, %2, %0 \n" + : "+ZB" (l->a.counter), "=&r" (result) + : "r" (-i) + : "memory"); + + result = result - i; + + return result; +} + +static inline long local_cmpxchg(local_t *l, long old, long new) +{ + return cmpxchg_local(&l->a.counter, old, new); +} + +static inline bool local_try_cmpxchg(local_t *l, long *old, long new) +{ + return try_cmpxchg_local(&l->a.counter, + (typeof(l->a.counter) *) old, new); +} + +#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) + +/** + * local_add_unless - add unless the number is a given value + * @l: pointer of type local_t + * @a: the amount to add to l... + * @u: ...unless l is equal to u. + * + * Atomically adds @a to @l, so long as it was not @u. + * Returns non-zero if @l was not @u, and zero otherwise. + */ +#define local_add_unless(l, a, u) \ +({ \ + long c, old; \ + c = local_read(l); \ + while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define local_inc_not_zero(l) local_add_unless((l), 1, 0) + +#define local_dec_return(l) local_sub_return(1, (l)) +#define local_inc_return(l) local_add_return(1, (l)) + +/* + * local_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @l: pointer of type local_t + * + * Atomically subtracts @i from @l and returns + * true if the result is zero, or false for all + * other cases. + */ +#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0) + +/* + * local_inc_and_test - increment and test + * @l: pointer of type local_t + * + * Atomically increments @l by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define local_inc_and_test(l) (local_inc_return(l) == 0) + +/* + * local_dec_and_test - decrement by 1 and test + * @l: pointer of type local_t + * + * Atomically decrements @l by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) + +/* + * local_add_negative - add and test if negative + * @l: pointer of type local_t + * @i: integer value to add + * + * Atomically adds @i to @l and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#define local_add_negative(i, l) (local_add_return(i, (l)) < 0) + +/* Use these for per-cpu local_t variables: on some archs they are + * much more efficient than these naive implementations. Note they take + * a variable, not an address. + */ + +#define __local_inc(l) ((l)->a.counter++) +#define __local_dec(l) ((l)->a.counter++) +#define __local_add(i, l) ((l)->a.counter += (i)) +#define __local_sub(i, l) ((l)->a.counter -= (i)) + +#endif /* _ARCH_LOONGARCH_LOCAL_H */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h new file mode 100644 index 0000000000..33531d432b --- /dev/null +++ b/arch/loongarch/include/asm/loongarch.h @@ -0,0 +1,1434 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LOONGARCH_H +#define _ASM_LOONGARCH_H + +#include <linux/bits.h> +#include <linux/linkage.h> +#include <linux/types.h> + +#ifndef __ASSEMBLY__ +#include <larchintrin.h> + +/* CPUCFG */ +#define read_cpucfg(reg) __cpucfg(reg) + +#endif /* !__ASSEMBLY__ */ + +#ifdef __ASSEMBLY__ + +/* LoongArch Registers */ +#define REG_ZERO 0x0 +#define REG_RA 0x1 +#define REG_TP 0x2 +#define REG_SP 0x3 +#define REG_A0 0x4 /* Reused as V0 for return value */ +#define REG_A1 0x5 /* Reused as V1 for return value */ +#define REG_A2 0x6 +#define REG_A3 0x7 +#define REG_A4 0x8 +#define REG_A5 0x9 +#define REG_A6 0xa +#define REG_A7 0xb +#define REG_T0 0xc +#define REG_T1 0xd +#define REG_T2 0xe +#define REG_T3 0xf +#define REG_T4 0x10 +#define REG_T5 0x11 +#define REG_T6 0x12 +#define REG_T7 0x13 +#define REG_T8 0x14 +#define REG_U0 0x15 /* Kernel uses it as percpu base */ +#define REG_FP 0x16 +#define REG_S0 0x17 +#define REG_S1 0x18 +#define REG_S2 0x19 +#define REG_S3 0x1a +#define REG_S4 0x1b +#define REG_S5 0x1c +#define REG_S6 0x1d +#define REG_S7 0x1e +#define REG_S8 0x1f + +#endif /* __ASSEMBLY__ */ + +/* Bit fields for CPUCFG registers */ +#define LOONGARCH_CPUCFG0 0x0 +#define CPUCFG0_PRID GENMASK(31, 0) + +#define LOONGARCH_CPUCFG1 0x1 +#define CPUCFG1_ISGR32 BIT(0) +#define CPUCFG1_ISGR64 BIT(1) +#define CPUCFG1_PAGING BIT(2) +#define CPUCFG1_IOCSR BIT(3) +#define CPUCFG1_PABITS GENMASK(11, 4) +#define CPUCFG1_VABITS GENMASK(19, 12) +#define CPUCFG1_UAL BIT(20) +#define CPUCFG1_RI BIT(21) +#define CPUCFG1_EP BIT(22) +#define CPUCFG1_RPLV BIT(23) +#define CPUCFG1_HUGEPG BIT(24) +#define CPUCFG1_CRC32 BIT(25) +#define CPUCFG1_MSGINT BIT(26) + +#define LOONGARCH_CPUCFG2 0x2 +#define CPUCFG2_FP BIT(0) +#define CPUCFG2_FPSP BIT(1) +#define CPUCFG2_FPDP BIT(2) +#define CPUCFG2_FPVERS GENMASK(5, 3) +#define CPUCFG2_LSX BIT(6) +#define CPUCFG2_LASX BIT(7) +#define CPUCFG2_COMPLEX BIT(8) +#define CPUCFG2_CRYPTO BIT(9) +#define CPUCFG2_LVZP BIT(10) +#define CPUCFG2_LVZVER GENMASK(13, 11) +#define CPUCFG2_LLFTP BIT(14) +#define CPUCFG2_LLFTPREV GENMASK(17, 15) +#define CPUCFG2_X86BT BIT(18) +#define CPUCFG2_ARMBT BIT(19) +#define CPUCFG2_MIPSBT BIT(20) +#define CPUCFG2_LSPW BIT(21) +#define CPUCFG2_LAM BIT(22) +#define CPUCFG2_PTW BIT(24) + +#define LOONGARCH_CPUCFG3 0x3 +#define CPUCFG3_CCDMA BIT(0) +#define CPUCFG3_SFB BIT(1) +#define CPUCFG3_UCACC BIT(2) +#define CPUCFG3_LLEXC BIT(3) +#define CPUCFG3_SCDLY BIT(4) +#define CPUCFG3_LLDBAR BIT(5) +#define CPUCFG3_ITLBT BIT(6) +#define CPUCFG3_ICACHET BIT(7) +#define CPUCFG3_SPW_LVL GENMASK(10, 8) +#define CPUCFG3_SPW_HG_HF BIT(11) +#define CPUCFG3_RVA BIT(12) +#define CPUCFG3_RVAMAX GENMASK(16, 13) + +#define LOONGARCH_CPUCFG4 0x4 +#define CPUCFG4_CCFREQ GENMASK(31, 0) + +#define LOONGARCH_CPUCFG5 0x5 +#define CPUCFG5_CCMUL GENMASK(15, 0) +#define CPUCFG5_CCDIV GENMASK(31, 16) + +#define LOONGARCH_CPUCFG6 0x6 +#define CPUCFG6_PMP BIT(0) +#define CPUCFG6_PAMVER GENMASK(3, 1) +#define CPUCFG6_PMNUM GENMASK(7, 4) +#define CPUCFG6_PMBITS GENMASK(13, 8) +#define CPUCFG6_UPM BIT(14) + +#define LOONGARCH_CPUCFG16 0x10 +#define CPUCFG16_L1_IUPRE BIT(0) +#define CPUCFG16_L1_IUUNIFY BIT(1) +#define CPUCFG16_L1_DPRE BIT(2) +#define CPUCFG16_L2_IUPRE BIT(3) +#define CPUCFG16_L2_IUUNIFY BIT(4) +#define CPUCFG16_L2_IUPRIV BIT(5) +#define CPUCFG16_L2_IUINCL BIT(6) +#define CPUCFG16_L2_DPRE BIT(7) +#define CPUCFG16_L2_DPRIV BIT(8) +#define CPUCFG16_L2_DINCL BIT(9) +#define CPUCFG16_L3_IUPRE BIT(10) +#define CPUCFG16_L3_IUUNIFY BIT(11) +#define CPUCFG16_L3_IUPRIV BIT(12) +#define CPUCFG16_L3_IUINCL BIT(13) +#define CPUCFG16_L3_DPRE BIT(14) +#define CPUCFG16_L3_DPRIV BIT(15) +#define CPUCFG16_L3_DINCL BIT(16) + +#define LOONGARCH_CPUCFG17 0x11 +#define LOONGARCH_CPUCFG18 0x12 +#define LOONGARCH_CPUCFG19 0x13 +#define LOONGARCH_CPUCFG20 0x14 +#define CPUCFG_CACHE_WAYS_M GENMASK(15, 0) +#define CPUCFG_CACHE_SETS_M GENMASK(23, 16) +#define CPUCFG_CACHE_LSIZE_M GENMASK(30, 24) +#define CPUCFG_CACHE_WAYS 0 +#define CPUCFG_CACHE_SETS 16 +#define CPUCFG_CACHE_LSIZE 24 + +#define LOONGARCH_CPUCFG48 0x30 +#define CPUCFG48_MCSR_LCK BIT(0) +#define CPUCFG48_NAP_EN BIT(1) +#define CPUCFG48_VFPU_CG BIT(2) +#define CPUCFG48_RAM_CG BIT(3) + +#ifndef __ASSEMBLY__ + +/* CSR */ +#define csr_read32(reg) __csrrd_w(reg) +#define csr_read64(reg) __csrrd_d(reg) +#define csr_write32(val, reg) __csrwr_w(val, reg) +#define csr_write64(val, reg) __csrwr_d(val, reg) +#define csr_xchg32(val, mask, reg) __csrxchg_w(val, mask, reg) +#define csr_xchg64(val, mask, reg) __csrxchg_d(val, mask, reg) + +/* IOCSR */ +#define iocsr_read32(reg) __iocsrrd_w(reg) +#define iocsr_read64(reg) __iocsrrd_d(reg) +#define iocsr_write32(val, reg) __iocsrwr_w(val, reg) +#define iocsr_write64(val, reg) __iocsrwr_d(val, reg) + +#endif /* !__ASSEMBLY__ */ + +/* CSR register number */ + +/* Basic CSR registers */ +#define LOONGARCH_CSR_CRMD 0x0 /* Current mode info */ +#define CSR_CRMD_WE_SHIFT 9 +#define CSR_CRMD_WE (_ULCAST_(0x1) << CSR_CRMD_WE_SHIFT) +#define CSR_CRMD_DACM_SHIFT 7 +#define CSR_CRMD_DACM_WIDTH 2 +#define CSR_CRMD_DACM (_ULCAST_(0x3) << CSR_CRMD_DACM_SHIFT) +#define CSR_CRMD_DACF_SHIFT 5 +#define CSR_CRMD_DACF_WIDTH 2 +#define CSR_CRMD_DACF (_ULCAST_(0x3) << CSR_CRMD_DACF_SHIFT) +#define CSR_CRMD_PG_SHIFT 4 +#define CSR_CRMD_PG (_ULCAST_(0x1) << CSR_CRMD_PG_SHIFT) +#define CSR_CRMD_DA_SHIFT 3 +#define CSR_CRMD_DA (_ULCAST_(0x1) << CSR_CRMD_DA_SHIFT) +#define CSR_CRMD_IE_SHIFT 2 +#define CSR_CRMD_IE (_ULCAST_(0x1) << CSR_CRMD_IE_SHIFT) +#define CSR_CRMD_PLV_SHIFT 0 +#define CSR_CRMD_PLV_WIDTH 2 +#define CSR_CRMD_PLV (_ULCAST_(0x3) << CSR_CRMD_PLV_SHIFT) + +#define PLV_KERN 0 +#define PLV_USER 3 +#define PLV_MASK 0x3 + +#define LOONGARCH_CSR_PRMD 0x1 /* Prev-exception mode info */ +#define CSR_PRMD_PWE_SHIFT 3 +#define CSR_PRMD_PWE (_ULCAST_(0x1) << CSR_PRMD_PWE_SHIFT) +#define CSR_PRMD_PIE_SHIFT 2 +#define CSR_PRMD_PIE (_ULCAST_(0x1) << CSR_PRMD_PIE_SHIFT) +#define CSR_PRMD_PPLV_SHIFT 0 +#define CSR_PRMD_PPLV_WIDTH 2 +#define CSR_PRMD_PPLV (_ULCAST_(0x3) << CSR_PRMD_PPLV_SHIFT) + +#define LOONGARCH_CSR_EUEN 0x2 /* Extended unit enable */ +#define CSR_EUEN_LBTEN_SHIFT 3 +#define CSR_EUEN_LBTEN (_ULCAST_(0x1) << CSR_EUEN_LBTEN_SHIFT) +#define CSR_EUEN_LASXEN_SHIFT 2 +#define CSR_EUEN_LASXEN (_ULCAST_(0x1) << CSR_EUEN_LASXEN_SHIFT) +#define CSR_EUEN_LSXEN_SHIFT 1 +#define CSR_EUEN_LSXEN (_ULCAST_(0x1) << CSR_EUEN_LSXEN_SHIFT) +#define CSR_EUEN_FPEN_SHIFT 0 +#define CSR_EUEN_FPEN (_ULCAST_(0x1) << CSR_EUEN_FPEN_SHIFT) + +#define LOONGARCH_CSR_MISC 0x3 /* Misc config */ + +#define LOONGARCH_CSR_ECFG 0x4 /* Exception config */ +#define CSR_ECFG_VS_SHIFT 16 +#define CSR_ECFG_VS_WIDTH 3 +#define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT) +#define CSR_ECFG_IM_SHIFT 0 +#define CSR_ECFG_IM_WIDTH 14 +#define CSR_ECFG_IM (_ULCAST_(0x3fff) << CSR_ECFG_IM_SHIFT) + +#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */ +#define CSR_ESTAT_ESUBCODE_SHIFT 22 +#define CSR_ESTAT_ESUBCODE_WIDTH 9 +#define CSR_ESTAT_ESUBCODE (_ULCAST_(0x1ff) << CSR_ESTAT_ESUBCODE_SHIFT) +#define CSR_ESTAT_EXC_SHIFT 16 +#define CSR_ESTAT_EXC_WIDTH 6 +#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT) +#define CSR_ESTAT_IS_SHIFT 0 +#define CSR_ESTAT_IS_WIDTH 14 +#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT) + +#define LOONGARCH_CSR_ERA 0x6 /* ERA */ + +#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */ + +#define LOONGARCH_CSR_BADI 0x8 /* Bad instruction */ + +#define LOONGARCH_CSR_EENTRY 0xc /* Exception entry */ + +/* TLB related CSR registers */ +#define LOONGARCH_CSR_TLBIDX 0x10 /* TLB Index, EHINV, PageSize, NP */ +#define CSR_TLBIDX_EHINV_SHIFT 31 +#define CSR_TLBIDX_EHINV (_ULCAST_(1) << CSR_TLBIDX_EHINV_SHIFT) +#define CSR_TLBIDX_PS_SHIFT 24 +#define CSR_TLBIDX_PS_WIDTH 6 +#define CSR_TLBIDX_PS (_ULCAST_(0x3f) << CSR_TLBIDX_PS_SHIFT) +#define CSR_TLBIDX_IDX_SHIFT 0 +#define CSR_TLBIDX_IDX_WIDTH 12 +#define CSR_TLBIDX_IDX (_ULCAST_(0xfff) << CSR_TLBIDX_IDX_SHIFT) +#define CSR_TLBIDX_SIZEM 0x3f000000 +#define CSR_TLBIDX_SIZE CSR_TLBIDX_PS_SHIFT +#define CSR_TLBIDX_IDXM 0xfff +#define CSR_INVALID_ENTRY(e) (CSR_TLBIDX_EHINV | e) + +#define LOONGARCH_CSR_TLBEHI 0x11 /* TLB EntryHi */ + +#define LOONGARCH_CSR_TLBELO0 0x12 /* TLB EntryLo0 */ +#define CSR_TLBLO0_RPLV_SHIFT 63 +#define CSR_TLBLO0_RPLV (_ULCAST_(0x1) << CSR_TLBLO0_RPLV_SHIFT) +#define CSR_TLBLO0_NX_SHIFT 62 +#define CSR_TLBLO0_NX (_ULCAST_(0x1) << CSR_TLBLO0_NX_SHIFT) +#define CSR_TLBLO0_NR_SHIFT 61 +#define CSR_TLBLO0_NR (_ULCAST_(0x1) << CSR_TLBLO0_NR_SHIFT) +#define CSR_TLBLO0_PFN_SHIFT 12 +#define CSR_TLBLO0_PFN_WIDTH 36 +#define CSR_TLBLO0_PFN (_ULCAST_(0xfffffffff) << CSR_TLBLO0_PFN_SHIFT) +#define CSR_TLBLO0_GLOBAL_SHIFT 6 +#define CSR_TLBLO0_GLOBAL (_ULCAST_(0x1) << CSR_TLBLO0_GLOBAL_SHIFT) +#define CSR_TLBLO0_CCA_SHIFT 4 +#define CSR_TLBLO0_CCA_WIDTH 2 +#define CSR_TLBLO0_CCA (_ULCAST_(0x3) << CSR_TLBLO0_CCA_SHIFT) +#define CSR_TLBLO0_PLV_SHIFT 2 +#define CSR_TLBLO0_PLV_WIDTH 2 +#define CSR_TLBLO0_PLV (_ULCAST_(0x3) << CSR_TLBLO0_PLV_SHIFT) +#define CSR_TLBLO0_WE_SHIFT 1 +#define CSR_TLBLO0_WE (_ULCAST_(0x1) << CSR_TLBLO0_WE_SHIFT) +#define CSR_TLBLO0_V_SHIFT 0 +#define CSR_TLBLO0_V (_ULCAST_(0x1) << CSR_TLBLO0_V_SHIFT) + +#define LOONGARCH_CSR_TLBELO1 0x13 /* TLB EntryLo1 */ +#define CSR_TLBLO1_RPLV_SHIFT 63 +#define CSR_TLBLO1_RPLV (_ULCAST_(0x1) << CSR_TLBLO1_RPLV_SHIFT) +#define CSR_TLBLO1_NX_SHIFT 62 +#define CSR_TLBLO1_NX (_ULCAST_(0x1) << CSR_TLBLO1_NX_SHIFT) +#define CSR_TLBLO1_NR_SHIFT 61 +#define CSR_TLBLO1_NR (_ULCAST_(0x1) << CSR_TLBLO1_NR_SHIFT) +#define CSR_TLBLO1_PFN_SHIFT 12 +#define CSR_TLBLO1_PFN_WIDTH 36 +#define CSR_TLBLO1_PFN (_ULCAST_(0xfffffffff) << CSR_TLBLO1_PFN_SHIFT) +#define CSR_TLBLO1_GLOBAL_SHIFT 6 +#define CSR_TLBLO1_GLOBAL (_ULCAST_(0x1) << CSR_TLBLO1_GLOBAL_SHIFT) +#define CSR_TLBLO1_CCA_SHIFT 4 +#define CSR_TLBLO1_CCA_WIDTH 2 +#define CSR_TLBLO1_CCA (_ULCAST_(0x3) << CSR_TLBLO1_CCA_SHIFT) +#define CSR_TLBLO1_PLV_SHIFT 2 +#define CSR_TLBLO1_PLV_WIDTH 2 +#define CSR_TLBLO1_PLV (_ULCAST_(0x3) << CSR_TLBLO1_PLV_SHIFT) +#define CSR_TLBLO1_WE_SHIFT 1 +#define CSR_TLBLO1_WE (_ULCAST_(0x1) << CSR_TLBLO1_WE_SHIFT) +#define CSR_TLBLO1_V_SHIFT 0 +#define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT) + +#define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */ +#define CSR_GTLBC_RID_SHIFT 16 +#define CSR_GTLBC_RID_WIDTH 8 +#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT) +#define CSR_GTLBC_TOTI_SHIFT 13 +#define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT) +#define CSR_GTLBC_USERID_SHIFT 12 +#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT) +#define CSR_GTLBC_GMTLBSZ_SHIFT 0 +#define CSR_GTLBC_GMTLBSZ_WIDTH 6 +#define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT) + +#define LOONGARCH_CSR_TRGP 0x16 /* TLBR read guest info */ +#define CSR_TRGP_RID_SHIFT 16 +#define CSR_TRGP_RID_WIDTH 8 +#define CSR_TRGP_RID (_ULCAST_(0xff) << CSR_TRGP_RID_SHIFT) +#define CSR_TRGP_GTLB_SHIFT 0 +#define CSR_TRGP_GTLB (1 << CSR_TRGP_GTLB_SHIFT) + +#define LOONGARCH_CSR_ASID 0x18 /* ASID */ +#define CSR_ASID_BIT_SHIFT 16 /* ASIDBits */ +#define CSR_ASID_BIT_WIDTH 8 +#define CSR_ASID_BIT (_ULCAST_(0xff) << CSR_ASID_BIT_SHIFT) +#define CSR_ASID_ASID_SHIFT 0 +#define CSR_ASID_ASID_WIDTH 10 +#define CSR_ASID_ASID (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT) + +#define LOONGARCH_CSR_PGDL 0x19 /* Page table base address when VA[VALEN-1] = 0 */ + +#define LOONGARCH_CSR_PGDH 0x1a /* Page table base address when VA[VALEN-1] = 1 */ + +#define LOONGARCH_CSR_PGD 0x1b /* Page table base */ + +#define LOONGARCH_CSR_PWCTL0 0x1c /* PWCtl0 */ +#define CSR_PWCTL0_PTEW_SHIFT 30 +#define CSR_PWCTL0_PTEW_WIDTH 2 +#define CSR_PWCTL0_PTEW (_ULCAST_(0x3) << CSR_PWCTL0_PTEW_SHIFT) +#define CSR_PWCTL0_DIR1WIDTH_SHIFT 25 +#define CSR_PWCTL0_DIR1WIDTH_WIDTH 5 +#define CSR_PWCTL0_DIR1WIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1WIDTH_SHIFT) +#define CSR_PWCTL0_DIR1BASE_SHIFT 20 +#define CSR_PWCTL0_DIR1BASE_WIDTH 5 +#define CSR_PWCTL0_DIR1BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1BASE_SHIFT) +#define CSR_PWCTL0_DIR0WIDTH_SHIFT 15 +#define CSR_PWCTL0_DIR0WIDTH_WIDTH 5 +#define CSR_PWCTL0_DIR0WIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0WIDTH_SHIFT) +#define CSR_PWCTL0_DIR0BASE_SHIFT 10 +#define CSR_PWCTL0_DIR0BASE_WIDTH 5 +#define CSR_PWCTL0_DIR0BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0BASE_SHIFT) +#define CSR_PWCTL0_PTWIDTH_SHIFT 5 +#define CSR_PWCTL0_PTWIDTH_WIDTH 5 +#define CSR_PWCTL0_PTWIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_PTWIDTH_SHIFT) +#define CSR_PWCTL0_PTBASE_SHIFT 0 +#define CSR_PWCTL0_PTBASE_WIDTH 5 +#define CSR_PWCTL0_PTBASE (_ULCAST_(0x1f) << CSR_PWCTL0_PTBASE_SHIFT) + +#define LOONGARCH_CSR_PWCTL1 0x1d /* PWCtl1 */ +#define CSR_PWCTL1_PTW_SHIFT 24 +#define CSR_PWCTL1_PTW_WIDTH 1 +#define CSR_PWCTL1_PTW (_ULCAST_(0x1) << CSR_PWCTL1_PTW_SHIFT) +#define CSR_PWCTL1_DIR3WIDTH_SHIFT 18 +#define CSR_PWCTL1_DIR3WIDTH_WIDTH 5 +#define CSR_PWCTL1_DIR3WIDTH (_ULCAST_(0x1f) << CSR_PWCTL1_DIR3WIDTH_SHIFT) +#define CSR_PWCTL1_DIR3BASE_SHIFT 12 +#define CSR_PWCTL1_DIR3BASE_WIDTH 5 +#define CSR_PWCTL1_DIR3BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR3BASE_SHIFT) +#define CSR_PWCTL1_DIR2WIDTH_SHIFT 6 +#define CSR_PWCTL1_DIR2WIDTH_WIDTH 5 +#define CSR_PWCTL1_DIR2WIDTH (_ULCAST_(0x1f) << CSR_PWCTL1_DIR2WIDTH_SHIFT) +#define CSR_PWCTL1_DIR2BASE_SHIFT 0 +#define CSR_PWCTL1_DIR2BASE_WIDTH 5 +#define CSR_PWCTL1_DIR2BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR2BASE_SHIFT) + +#define LOONGARCH_CSR_STLBPGSIZE 0x1e +#define CSR_STLBPGSIZE_PS_WIDTH 6 +#define CSR_STLBPGSIZE_PS (_ULCAST_(0x3f)) + +#define LOONGARCH_CSR_RVACFG 0x1f +#define CSR_RVACFG_RDVA_WIDTH 4 +#define CSR_RVACFG_RDVA (_ULCAST_(0xf)) + +/* Config CSR registers */ +#define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */ +#define CSR_CPUID_COREID_WIDTH 9 +#define CSR_CPUID_COREID _ULCAST_(0x1ff) + +#define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */ +#define CSR_CONF1_VSMAX_SHIFT 12 +#define CSR_CONF1_VSMAX_WIDTH 3 +#define CSR_CONF1_VSMAX (_ULCAST_(7) << CSR_CONF1_VSMAX_SHIFT) +#define CSR_CONF1_TMRBITS_SHIFT 4 +#define CSR_CONF1_TMRBITS_WIDTH 8 +#define CSR_CONF1_TMRBITS (_ULCAST_(0xff) << CSR_CONF1_TMRBITS_SHIFT) +#define CSR_CONF1_KSNUM_WIDTH 4 +#define CSR_CONF1_KSNUM _ULCAST_(0xf) + +#define LOONGARCH_CSR_PRCFG2 0x22 /* Config2 */ +#define CSR_CONF2_PGMASK_SUPP 0x3ffff000 + +#define LOONGARCH_CSR_PRCFG3 0x23 /* Config3 */ +#define CSR_CONF3_STLBIDX_SHIFT 20 +#define CSR_CONF3_STLBIDX_WIDTH 6 +#define CSR_CONF3_STLBIDX (_ULCAST_(0x3f) << CSR_CONF3_STLBIDX_SHIFT) +#define CSR_CONF3_STLBWAYS_SHIFT 12 +#define CSR_CONF3_STLBWAYS_WIDTH 8 +#define CSR_CONF3_STLBWAYS (_ULCAST_(0xff) << CSR_CONF3_STLBWAYS_SHIFT) +#define CSR_CONF3_MTLBSIZE_SHIFT 4 +#define CSR_CONF3_MTLBSIZE_WIDTH 8 +#define CSR_CONF3_MTLBSIZE (_ULCAST_(0xff) << CSR_CONF3_MTLBSIZE_SHIFT) +#define CSR_CONF3_TLBTYPE_SHIFT 0 +#define CSR_CONF3_TLBTYPE_WIDTH 4 +#define CSR_CONF3_TLBTYPE (_ULCAST_(0xf) << CSR_CONF3_TLBTYPE_SHIFT) + +/* KSave registers */ +#define LOONGARCH_CSR_KS0 0x30 +#define LOONGARCH_CSR_KS1 0x31 +#define LOONGARCH_CSR_KS2 0x32 +#define LOONGARCH_CSR_KS3 0x33 +#define LOONGARCH_CSR_KS4 0x34 +#define LOONGARCH_CSR_KS5 0x35 +#define LOONGARCH_CSR_KS6 0x36 +#define LOONGARCH_CSR_KS7 0x37 +#define LOONGARCH_CSR_KS8 0x38 + +/* Exception allocated KS0, KS1 and KS2 statically */ +#define EXCEPTION_KS0 LOONGARCH_CSR_KS0 +#define EXCEPTION_KS1 LOONGARCH_CSR_KS1 +#define EXCEPTION_KS2 LOONGARCH_CSR_KS2 +#define EXC_KSAVE_MASK (1 << 0 | 1 << 1 | 1 << 2) + +/* Percpu-data base allocated KS3 statically */ +#define PERCPU_BASE_KS LOONGARCH_CSR_KS3 +#define PERCPU_KSAVE_MASK (1 << 3) + +/* KVM allocated KS4 and KS5 statically */ +#define KVM_VCPU_KS LOONGARCH_CSR_KS4 +#define KVM_TEMP_KS LOONGARCH_CSR_KS5 +#define KVM_KSAVE_MASK (1 << 4 | 1 << 5) + +/* Timer registers */ +#define LOONGARCH_CSR_TMID 0x40 /* Timer ID */ + +#define LOONGARCH_CSR_TCFG 0x41 /* Timer config */ +#define CSR_TCFG_VAL_SHIFT 2 +#define CSR_TCFG_VAL_WIDTH 48 +#define CSR_TCFG_VAL (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT) +#define CSR_TCFG_PERIOD_SHIFT 1 +#define CSR_TCFG_PERIOD (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT) +#define CSR_TCFG_EN (_ULCAST_(0x1)) + +#define LOONGARCH_CSR_TVAL 0x42 /* Timer value */ + +#define LOONGARCH_CSR_CNTC 0x43 /* Timer offset */ + +#define LOONGARCH_CSR_TINTCLR 0x44 /* Timer interrupt clear */ +#define CSR_TINTCLR_TI_SHIFT 0 +#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT) + +/* Guest registers */ +#define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */ +#define CSR_GSTAT_GID_SHIFT 16 +#define CSR_GSTAT_GID_WIDTH 8 +#define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT) +#define CSR_GSTAT_GIDBIT_SHIFT 4 +#define CSR_GSTAT_GIDBIT_WIDTH 6 +#define CSR_GSTAT_GIDBIT (_ULCAST_(0x3f) << CSR_GSTAT_GIDBIT_SHIFT) +#define CSR_GSTAT_PVM_SHIFT 1 +#define CSR_GSTAT_PVM (_ULCAST_(0x1) << CSR_GSTAT_PVM_SHIFT) +#define CSR_GSTAT_VM_SHIFT 0 +#define CSR_GSTAT_VM (_ULCAST_(0x1) << CSR_GSTAT_VM_SHIFT) + +#define LOONGARCH_CSR_GCFG 0x51 /* Guest config */ +#define CSR_GCFG_GPERF_SHIFT 24 +#define CSR_GCFG_GPERF_WIDTH 3 +#define CSR_GCFG_GPERF (_ULCAST_(0x7) << CSR_GCFG_GPERF_SHIFT) +#define CSR_GCFG_GCI_SHIFT 20 +#define CSR_GCFG_GCI_WIDTH 2 +#define CSR_GCFG_GCI (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_ALL (_ULCAST_(0x0) << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_HIT (_ULCAST_(0x1) << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_SECURE (_ULCAST_(0x2) << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCIP_SHIFT 16 +#define CSR_GCFG_GCIP (_ULCAST_(0xf) << CSR_GCFG_GCIP_SHIFT) +#define CSR_GCFG_GCIP_ALL (_ULCAST_(0x1) << CSR_GCFG_GCIP_SHIFT) +#define CSR_GCFG_GCIP_HIT (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 1)) +#define CSR_GCFG_GCIP_SECURE (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 2)) +#define CSR_GCFG_TORU_SHIFT 15 +#define CSR_GCFG_TORU (_ULCAST_(0x1) << CSR_GCFG_TORU_SHIFT) +#define CSR_GCFG_TORUP_SHIFT 14 +#define CSR_GCFG_TORUP (_ULCAST_(0x1) << CSR_GCFG_TORUP_SHIFT) +#define CSR_GCFG_TOP_SHIFT 13 +#define CSR_GCFG_TOP (_ULCAST_(0x1) << CSR_GCFG_TOP_SHIFT) +#define CSR_GCFG_TOPP_SHIFT 12 +#define CSR_GCFG_TOPP (_ULCAST_(0x1) << CSR_GCFG_TOPP_SHIFT) +#define CSR_GCFG_TOE_SHIFT 11 +#define CSR_GCFG_TOE (_ULCAST_(0x1) << CSR_GCFG_TOE_SHIFT) +#define CSR_GCFG_TOEP_SHIFT 10 +#define CSR_GCFG_TOEP (_ULCAST_(0x1) << CSR_GCFG_TOEP_SHIFT) +#define CSR_GCFG_TIT_SHIFT 9 +#define CSR_GCFG_TIT (_ULCAST_(0x1) << CSR_GCFG_TIT_SHIFT) +#define CSR_GCFG_TITP_SHIFT 8 +#define CSR_GCFG_TITP (_ULCAST_(0x1) << CSR_GCFG_TITP_SHIFT) +#define CSR_GCFG_SIT_SHIFT 7 +#define CSR_GCFG_SIT (_ULCAST_(0x1) << CSR_GCFG_SIT_SHIFT) +#define CSR_GCFG_SITP_SHIFT 6 +#define CSR_GCFG_SITP (_ULCAST_(0x1) << CSR_GCFG_SITP_SHIFT) +#define CSR_GCFG_MATC_SHITF 4 +#define CSR_GCFG_MATC_WIDTH 2 +#define CSR_GCFG_MATC_MASK (_ULCAST_(0x3) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF) + +#define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */ +#define CSR_GINTC_HC_SHIFT 16 +#define CSR_GINTC_HC_WIDTH 8 +#define CSR_GINTC_HC (_ULCAST_(0xff) << CSR_GINTC_HC_SHIFT) +#define CSR_GINTC_PIP_SHIFT 8 +#define CSR_GINTC_PIP_WIDTH 8 +#define CSR_GINTC_PIP (_ULCAST_(0xff) << CSR_GINTC_PIP_SHIFT) +#define CSR_GINTC_VIP_SHIFT 0 +#define CSR_GINTC_VIP_WIDTH 8 +#define CSR_GINTC_VIP (_ULCAST_(0xff)) + +#define LOONGARCH_CSR_GCNTC 0x53 /* Guest timer offset */ + +/* LLBCTL register */ +#define LOONGARCH_CSR_LLBCTL 0x60 /* LLBit control */ +#define CSR_LLBCTL_ROLLB_SHIFT 0 +#define CSR_LLBCTL_ROLLB (_ULCAST_(1) << CSR_LLBCTL_ROLLB_SHIFT) +#define CSR_LLBCTL_WCLLB_SHIFT 1 +#define CSR_LLBCTL_WCLLB (_ULCAST_(1) << CSR_LLBCTL_WCLLB_SHIFT) +#define CSR_LLBCTL_KLO_SHIFT 2 +#define CSR_LLBCTL_KLO (_ULCAST_(1) << CSR_LLBCTL_KLO_SHIFT) + +/* Implement dependent */ +#define LOONGARCH_CSR_IMPCTL1 0x80 /* Loongson config1 */ +#define CSR_MISPEC_SHIFT 20 +#define CSR_MISPEC_WIDTH 8 +#define CSR_MISPEC (_ULCAST_(0xff) << CSR_MISPEC_SHIFT) +#define CSR_SSEN_SHIFT 18 +#define CSR_SSEN (_ULCAST_(1) << CSR_SSEN_SHIFT) +#define CSR_SCRAND_SHIFT 17 +#define CSR_SCRAND (_ULCAST_(1) << CSR_SCRAND_SHIFT) +#define CSR_LLEXCL_SHIFT 16 +#define CSR_LLEXCL (_ULCAST_(1) << CSR_LLEXCL_SHIFT) +#define CSR_DISVC_SHIFT 15 +#define CSR_DISVC (_ULCAST_(1) << CSR_DISVC_SHIFT) +#define CSR_VCLRU_SHIFT 14 +#define CSR_VCLRU (_ULCAST_(1) << CSR_VCLRU_SHIFT) +#define CSR_DCLRU_SHIFT 13 +#define CSR_DCLRU (_ULCAST_(1) << CSR_DCLRU_SHIFT) +#define CSR_FASTLDQ_SHIFT 12 +#define CSR_FASTLDQ (_ULCAST_(1) << CSR_FASTLDQ_SHIFT) +#define CSR_USERCAC_SHIFT 11 +#define CSR_USERCAC (_ULCAST_(1) << CSR_USERCAC_SHIFT) +#define CSR_ANTI_MISPEC_SHIFT 10 +#define CSR_ANTI_MISPEC (_ULCAST_(1) << CSR_ANTI_MISPEC_SHIFT) +#define CSR_AUTO_FLUSHSFB_SHIFT 9 +#define CSR_AUTO_FLUSHSFB (_ULCAST_(1) << CSR_AUTO_FLUSHSFB_SHIFT) +#define CSR_STFILL_SHIFT 8 +#define CSR_STFILL (_ULCAST_(1) << CSR_STFILL_SHIFT) +#define CSR_LIFEP_SHIFT 7 +#define CSR_LIFEP (_ULCAST_(1) << CSR_LIFEP_SHIFT) +#define CSR_LLSYNC_SHIFT 6 +#define CSR_LLSYNC (_ULCAST_(1) << CSR_LLSYNC_SHIFT) +#define CSR_BRBTDIS_SHIFT 5 +#define CSR_BRBTDIS (_ULCAST_(1) << CSR_BRBTDIS_SHIFT) +#define CSR_RASDIS_SHIFT 4 +#define CSR_RASDIS (_ULCAST_(1) << CSR_RASDIS_SHIFT) +#define CSR_STPRE_SHIFT 2 +#define CSR_STPRE_WIDTH 2 +#define CSR_STPRE (_ULCAST_(3) << CSR_STPRE_SHIFT) +#define CSR_INSTPRE_SHIFT 1 +#define CSR_INSTPRE (_ULCAST_(1) << CSR_INSTPRE_SHIFT) +#define CSR_DATAPRE_SHIFT 0 +#define CSR_DATAPRE (_ULCAST_(1) << CSR_DATAPRE_SHIFT) + +#define LOONGARCH_CSR_IMPCTL2 0x81 /* Loongson config2 */ +#define CSR_FLUSH_MTLB_SHIFT 0 +#define CSR_FLUSH_MTLB (_ULCAST_(1) << CSR_FLUSH_MTLB_SHIFT) +#define CSR_FLUSH_STLB_SHIFT 1 +#define CSR_FLUSH_STLB (_ULCAST_(1) << CSR_FLUSH_STLB_SHIFT) +#define CSR_FLUSH_DTLB_SHIFT 2 +#define CSR_FLUSH_DTLB (_ULCAST_(1) << CSR_FLUSH_DTLB_SHIFT) +#define CSR_FLUSH_ITLB_SHIFT 3 +#define CSR_FLUSH_ITLB (_ULCAST_(1) << CSR_FLUSH_ITLB_SHIFT) +#define CSR_FLUSH_BTAC_SHIFT 4 +#define CSR_FLUSH_BTAC (_ULCAST_(1) << CSR_FLUSH_BTAC_SHIFT) + +#define LOONGARCH_CSR_GNMI 0x82 + +/* TLB Refill registers */ +#define LOONGARCH_CSR_TLBRENTRY 0x88 /* TLB refill exception entry */ +#define LOONGARCH_CSR_TLBRBADV 0x89 /* TLB refill badvaddr */ +#define LOONGARCH_CSR_TLBRERA 0x8a /* TLB refill ERA */ +#define LOONGARCH_CSR_TLBRSAVE 0x8b /* KSave for TLB refill exception */ +#define LOONGARCH_CSR_TLBRELO0 0x8c /* TLB refill entrylo0 */ +#define LOONGARCH_CSR_TLBRELO1 0x8d /* TLB refill entrylo1 */ +#define LOONGARCH_CSR_TLBREHI 0x8e /* TLB refill entryhi */ +#define CSR_TLBREHI_PS_SHIFT 0 +#define CSR_TLBREHI_PS (_ULCAST_(0x3f) << CSR_TLBREHI_PS_SHIFT) +#define LOONGARCH_CSR_TLBRPRMD 0x8f /* TLB refill mode info */ + +/* Machine Error registers */ +#define LOONGARCH_CSR_MERRCTL 0x90 /* MERRCTL */ +#define LOONGARCH_CSR_MERRINFO1 0x91 /* MError info1 */ +#define LOONGARCH_CSR_MERRINFO2 0x92 /* MError info2 */ +#define LOONGARCH_CSR_MERRENTRY 0x93 /* MError exception entry */ +#define LOONGARCH_CSR_MERRERA 0x94 /* MError exception ERA */ +#define LOONGARCH_CSR_MERRSAVE 0x95 /* KSave for machine error exception */ + +#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */ + +#define LOONGARCH_CSR_PRID 0xc0 + +/* Shadow MCSR : 0xc0 ~ 0xff */ +#define LOONGARCH_CSR_MCSR0 0xc0 /* CPUCFG0 and CPUCFG1 */ +#define MCSR0_INT_IMPL_SHIFT 58 +#define MCSR0_INT_IMPL 0 +#define MCSR0_IOCSR_BRD_SHIFT 57 +#define MCSR0_IOCSR_BRD (_ULCAST_(1) << MCSR0_IOCSR_BRD_SHIFT) +#define MCSR0_HUGEPG_SHIFT 56 +#define MCSR0_HUGEPG (_ULCAST_(1) << MCSR0_HUGEPG_SHIFT) +#define MCSR0_RPLMTLB_SHIFT 55 +#define MCSR0_RPLMTLB (_ULCAST_(1) << MCSR0_RPLMTLB_SHIFT) +#define MCSR0_EP_SHIFT 54 +#define MCSR0_EP (_ULCAST_(1) << MCSR0_EP_SHIFT) +#define MCSR0_RI_SHIFT 53 +#define MCSR0_RI (_ULCAST_(1) << MCSR0_RI_SHIFT) +#define MCSR0_UAL_SHIFT 52 +#define MCSR0_UAL (_ULCAST_(1) << MCSR0_UAL_SHIFT) +#define MCSR0_VABIT_SHIFT 44 +#define MCSR0_VABIT_WIDTH 8 +#define MCSR0_VABIT (_ULCAST_(0xff) << MCSR0_VABIT_SHIFT) +#define VABIT_DEFAULT 0x2f +#define MCSR0_PABIT_SHIFT 36 +#define MCSR0_PABIT_WIDTH 8 +#define MCSR0_PABIT (_ULCAST_(0xff) << MCSR0_PABIT_SHIFT) +#define PABIT_DEFAULT 0x2f +#define MCSR0_IOCSR_SHIFT 35 +#define MCSR0_IOCSR (_ULCAST_(1) << MCSR0_IOCSR_SHIFT) +#define MCSR0_PAGING_SHIFT 34 +#define MCSR0_PAGING (_ULCAST_(1) << MCSR0_PAGING_SHIFT) +#define MCSR0_GR64_SHIFT 33 +#define MCSR0_GR64 (_ULCAST_(1) << MCSR0_GR64_SHIFT) +#define GR64_DEFAULT 1 +#define MCSR0_GR32_SHIFT 32 +#define MCSR0_GR32 (_ULCAST_(1) << MCSR0_GR32_SHIFT) +#define GR32_DEFAULT 0 +#define MCSR0_PRID_WIDTH 32 +#define MCSR0_PRID 0x14C010 + +#define LOONGARCH_CSR_MCSR1 0xc1 /* CPUCFG2 and CPUCFG3 */ +#define MCSR1_HPFOLD_SHIFT 43 +#define MCSR1_HPFOLD (_ULCAST_(1) << MCSR1_HPFOLD_SHIFT) +#define MCSR1_SPW_LVL_SHIFT 40 +#define MCSR1_SPW_LVL_WIDTH 3 +#define MCSR1_SPW_LVL (_ULCAST_(7) << MCSR1_SPW_LVL_SHIFT) +#define MCSR1_ICACHET_SHIFT 39 +#define MCSR1_ICACHET (_ULCAST_(1) << MCSR1_ICACHET_SHIFT) +#define MCSR1_ITLBT_SHIFT 38 +#define MCSR1_ITLBT (_ULCAST_(1) << MCSR1_ITLBT_SHIFT) +#define MCSR1_LLDBAR_SHIFT 37 +#define MCSR1_LLDBAR (_ULCAST_(1) << MCSR1_LLDBAR_SHIFT) +#define MCSR1_SCDLY_SHIFT 36 +#define MCSR1_SCDLY (_ULCAST_(1) << MCSR1_SCDLY_SHIFT) +#define MCSR1_LLEXC_SHIFT 35 +#define MCSR1_LLEXC (_ULCAST_(1) << MCSR1_LLEXC_SHIFT) +#define MCSR1_UCACC_SHIFT 34 +#define MCSR1_UCACC (_ULCAST_(1) << MCSR1_UCACC_SHIFT) +#define MCSR1_SFB_SHIFT 33 +#define MCSR1_SFB (_ULCAST_(1) << MCSR1_SFB_SHIFT) +#define MCSR1_CCDMA_SHIFT 32 +#define MCSR1_CCDMA (_ULCAST_(1) << MCSR1_CCDMA_SHIFT) +#define MCSR1_LAMO_SHIFT 22 +#define MCSR1_LAMO (_ULCAST_(1) << MCSR1_LAMO_SHIFT) +#define MCSR1_LSPW_SHIFT 21 +#define MCSR1_LSPW (_ULCAST_(1) << MCSR1_LSPW_SHIFT) +#define MCSR1_MIPSBT_SHIFT 20 +#define MCSR1_MIPSBT (_ULCAST_(1) << MCSR1_MIPSBT_SHIFT) +#define MCSR1_ARMBT_SHIFT 19 +#define MCSR1_ARMBT (_ULCAST_(1) << MCSR1_ARMBT_SHIFT) +#define MCSR1_X86BT_SHIFT 18 +#define MCSR1_X86BT (_ULCAST_(1) << MCSR1_X86BT_SHIFT) +#define MCSR1_LLFTPVERS_SHIFT 15 +#define MCSR1_LLFTPVERS_WIDTH 3 +#define MCSR1_LLFTPVERS (_ULCAST_(7) << MCSR1_LLFTPVERS_SHIFT) +#define MCSR1_LLFTP_SHIFT 14 +#define MCSR1_LLFTP (_ULCAST_(1) << MCSR1_LLFTP_SHIFT) +#define MCSR1_VZVERS_SHIFT 11 +#define MCSR1_VZVERS_WIDTH 3 +#define MCSR1_VZVERS (_ULCAST_(7) << MCSR1_VZVERS_SHIFT) +#define MCSR1_VZ_SHIFT 10 +#define MCSR1_VZ (_ULCAST_(1) << MCSR1_VZ_SHIFT) +#define MCSR1_CRYPTO_SHIFT 9 +#define MCSR1_CRYPTO (_ULCAST_(1) << MCSR1_CRYPTO_SHIFT) +#define MCSR1_COMPLEX_SHIFT 8 +#define MCSR1_COMPLEX (_ULCAST_(1) << MCSR1_COMPLEX_SHIFT) +#define MCSR1_LASX_SHIFT 7 +#define MCSR1_LASX (_ULCAST_(1) << MCSR1_LASX_SHIFT) +#define MCSR1_LSX_SHIFT 6 +#define MCSR1_LSX (_ULCAST_(1) << MCSR1_LSX_SHIFT) +#define MCSR1_FPVERS_SHIFT 3 +#define MCSR1_FPVERS_WIDTH 3 +#define MCSR1_FPVERS (_ULCAST_(7) << MCSR1_FPVERS_SHIFT) +#define MCSR1_FPDP_SHIFT 2 +#define MCSR1_FPDP (_ULCAST_(1) << MCSR1_FPDP_SHIFT) +#define MCSR1_FPSP_SHIFT 1 +#define MCSR1_FPSP (_ULCAST_(1) << MCSR1_FPSP_SHIFT) +#define MCSR1_FP_SHIFT 0 +#define MCSR1_FP (_ULCAST_(1) << MCSR1_FP_SHIFT) + +#define LOONGARCH_CSR_MCSR2 0xc2 /* CPUCFG4 and CPUCFG5 */ +#define MCSR2_CCDIV_SHIFT 48 +#define MCSR2_CCDIV_WIDTH 16 +#define MCSR2_CCDIV (_ULCAST_(0xffff) << MCSR2_CCDIV_SHIFT) +#define MCSR2_CCMUL_SHIFT 32 +#define MCSR2_CCMUL_WIDTH 16 +#define MCSR2_CCMUL (_ULCAST_(0xffff) << MCSR2_CCMUL_SHIFT) +#define MCSR2_CCFREQ_WIDTH 32 +#define MCSR2_CCFREQ (_ULCAST_(0xffffffff)) +#define CCFREQ_DEFAULT 0x5f5e100 /* 100MHz */ + +#define LOONGARCH_CSR_MCSR3 0xc3 /* CPUCFG6 */ +#define MCSR3_UPM_SHIFT 14 +#define MCSR3_UPM (_ULCAST_(1) << MCSR3_UPM_SHIFT) +#define MCSR3_PMBITS_SHIFT 8 +#define MCSR3_PMBITS_WIDTH 6 +#define MCSR3_PMBITS (_ULCAST_(0x3f) << MCSR3_PMBITS_SHIFT) +#define PMBITS_DEFAULT 0x40 +#define MCSR3_PMNUM_SHIFT 4 +#define MCSR3_PMNUM_WIDTH 4 +#define MCSR3_PMNUM (_ULCAST_(0xf) << MCSR3_PMNUM_SHIFT) +#define MCSR3_PAMVER_SHIFT 1 +#define MCSR3_PAMVER_WIDTH 3 +#define MCSR3_PAMVER (_ULCAST_(0x7) << MCSR3_PAMVER_SHIFT) +#define MCSR3_PMP_SHIFT 0 +#define MCSR3_PMP (_ULCAST_(1) << MCSR3_PMP_SHIFT) + +#define LOONGARCH_CSR_MCSR8 0xc8 /* CPUCFG16 and CPUCFG17 */ +#define MCSR8_L1I_SIZE_SHIFT 56 +#define MCSR8_L1I_SIZE_WIDTH 7 +#define MCSR8_L1I_SIZE (_ULCAST_(0x7f) << MCSR8_L1I_SIZE_SHIFT) +#define MCSR8_L1I_IDX_SHIFT 48 +#define MCSR8_L1I_IDX_WIDTH 8 +#define MCSR8_L1I_IDX (_ULCAST_(0xff) << MCSR8_L1I_IDX_SHIFT) +#define MCSR8_L1I_WAY_SHIFT 32 +#define MCSR8_L1I_WAY_WIDTH 16 +#define MCSR8_L1I_WAY (_ULCAST_(0xffff) << MCSR8_L1I_WAY_SHIFT) +#define MCSR8_L3DINCL_SHIFT 16 +#define MCSR8_L3DINCL (_ULCAST_(1) << MCSR8_L3DINCL_SHIFT) +#define MCSR8_L3DPRIV_SHIFT 15 +#define MCSR8_L3DPRIV (_ULCAST_(1) << MCSR8_L3DPRIV_SHIFT) +#define MCSR8_L3DPRE_SHIFT 14 +#define MCSR8_L3DPRE (_ULCAST_(1) << MCSR8_L3DPRE_SHIFT) +#define MCSR8_L3IUINCL_SHIFT 13 +#define MCSR8_L3IUINCL (_ULCAST_(1) << MCSR8_L3IUINCL_SHIFT) +#define MCSR8_L3IUPRIV_SHIFT 12 +#define MCSR8_L3IUPRIV (_ULCAST_(1) << MCSR8_L3IUPRIV_SHIFT) +#define MCSR8_L3IUUNIFY_SHIFT 11 +#define MCSR8_L3IUUNIFY (_ULCAST_(1) << MCSR8_L3IUUNIFY_SHIFT) +#define MCSR8_L3IUPRE_SHIFT 10 +#define MCSR8_L3IUPRE (_ULCAST_(1) << MCSR8_L3IUPRE_SHIFT) +#define MCSR8_L2DINCL_SHIFT 9 +#define MCSR8_L2DINCL (_ULCAST_(1) << MCSR8_L2DINCL_SHIFT) +#define MCSR8_L2DPRIV_SHIFT 8 +#define MCSR8_L2DPRIV (_ULCAST_(1) << MCSR8_L2DPRIV_SHIFT) +#define MCSR8_L2DPRE_SHIFT 7 +#define MCSR8_L2DPRE (_ULCAST_(1) << MCSR8_L2DPRE_SHIFT) +#define MCSR8_L2IUINCL_SHIFT 6 +#define MCSR8_L2IUINCL (_ULCAST_(1) << MCSR8_L2IUINCL_SHIFT) +#define MCSR8_L2IUPRIV_SHIFT 5 +#define MCSR8_L2IUPRIV (_ULCAST_(1) << MCSR8_L2IUPRIV_SHIFT) +#define MCSR8_L2IUUNIFY_SHIFT 4 +#define MCSR8_L2IUUNIFY (_ULCAST_(1) << MCSR8_L2IUUNIFY_SHIFT) +#define MCSR8_L2IUPRE_SHIFT 3 +#define MCSR8_L2IUPRE (_ULCAST_(1) << MCSR8_L2IUPRE_SHIFT) +#define MCSR8_L1DPRE_SHIFT 2 +#define MCSR8_L1DPRE (_ULCAST_(1) << MCSR8_L1DPRE_SHIFT) +#define MCSR8_L1IUUNIFY_SHIFT 1 +#define MCSR8_L1IUUNIFY (_ULCAST_(1) << MCSR8_L1IUUNIFY_SHIFT) +#define MCSR8_L1IUPRE_SHIFT 0 +#define MCSR8_L1IUPRE (_ULCAST_(1) << MCSR8_L1IUPRE_SHIFT) + +#define LOONGARCH_CSR_MCSR9 0xc9 /* CPUCFG18 and CPUCFG19 */ +#define MCSR9_L2U_SIZE_SHIFT 56 +#define MCSR9_L2U_SIZE_WIDTH 7 +#define MCSR9_L2U_SIZE (_ULCAST_(0x7f) << MCSR9_L2U_SIZE_SHIFT) +#define MCSR9_L2U_IDX_SHIFT 48 +#define MCSR9_L2U_IDX_WIDTH 8 +#define MCSR9_L2U_IDX (_ULCAST_(0xff) << MCSR9_IDX_LOG_SHIFT) +#define MCSR9_L2U_WAY_SHIFT 32 +#define MCSR9_L2U_WAY_WIDTH 16 +#define MCSR9_L2U_WAY (_ULCAST_(0xffff) << MCSR9_L2U_WAY_SHIFT) +#define MCSR9_L1D_SIZE_SHIFT 24 +#define MCSR9_L1D_SIZE_WIDTH 7 +#define MCSR9_L1D_SIZE (_ULCAST_(0x7f) << MCSR9_L1D_SIZE_SHIFT) +#define MCSR9_L1D_IDX_SHIFT 16 +#define MCSR9_L1D_IDX_WIDTH 8 +#define MCSR9_L1D_IDX (_ULCAST_(0xff) << MCSR9_L1D_IDX_SHIFT) +#define MCSR9_L1D_WAY_SHIFT 0 +#define MCSR9_L1D_WAY_WIDTH 16 +#define MCSR9_L1D_WAY (_ULCAST_(0xffff) << MCSR9_L1D_WAY_SHIFT) + +#define LOONGARCH_CSR_MCSR10 0xca /* CPUCFG20 */ +#define MCSR10_L3U_SIZE_SHIFT 24 +#define MCSR10_L3U_SIZE_WIDTH 7 +#define MCSR10_L3U_SIZE (_ULCAST_(0x7f) << MCSR10_L3U_SIZE_SHIFT) +#define MCSR10_L3U_IDX_SHIFT 16 +#define MCSR10_L3U_IDX_WIDTH 8 +#define MCSR10_L3U_IDX (_ULCAST_(0xff) << MCSR10_L3U_IDX_SHIFT) +#define MCSR10_L3U_WAY_SHIFT 0 +#define MCSR10_L3U_WAY_WIDTH 16 +#define MCSR10_L3U_WAY (_ULCAST_(0xffff) << MCSR10_L3U_WAY_SHIFT) + +#define LOONGARCH_CSR_MCSR24 0xf0 /* cpucfg48 */ +#define MCSR24_RAMCG_SHIFT 3 +#define MCSR24_RAMCG (_ULCAST_(1) << MCSR24_RAMCG_SHIFT) +#define MCSR24_VFPUCG_SHIFT 2 +#define MCSR24_VFPUCG (_ULCAST_(1) << MCSR24_VFPUCG_SHIFT) +#define MCSR24_NAPEN_SHIFT 1 +#define MCSR24_NAPEN (_ULCAST_(1) << MCSR24_NAPEN_SHIFT) +#define MCSR24_MCSRLOCK_SHIFT 0 +#define MCSR24_MCSRLOCK (_ULCAST_(1) << MCSR24_MCSRLOCK_SHIFT) + +/* Uncached accelerate windows registers */ +#define LOONGARCH_CSR_UCAWIN 0x100 +#define LOONGARCH_CSR_UCAWIN0_LO 0x102 +#define LOONGARCH_CSR_UCAWIN0_HI 0x103 +#define LOONGARCH_CSR_UCAWIN1_LO 0x104 +#define LOONGARCH_CSR_UCAWIN1_HI 0x105 +#define LOONGARCH_CSR_UCAWIN2_LO 0x106 +#define LOONGARCH_CSR_UCAWIN2_HI 0x107 +#define LOONGARCH_CSR_UCAWIN3_LO 0x108 +#define LOONGARCH_CSR_UCAWIN3_HI 0x109 + +/* Direct Map windows registers */ +#define LOONGARCH_CSR_DMWIN0 0x180 /* 64 direct map win0: MEM & IF */ +#define LOONGARCH_CSR_DMWIN1 0x181 /* 64 direct map win1: MEM & IF */ +#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */ +#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */ + +/* Direct Map window 0/1 */ +#define CSR_DMW0_PLV0 _CONST64_(1 << 0) +#define CSR_DMW0_VSEG _CONST64_(0x8000) +#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS) +#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0) + +#define CSR_DMW1_PLV0 _CONST64_(1 << 0) +#define CSR_DMW1_MAT _CONST64_(1 << 4) +#define CSR_DMW1_VSEG _CONST64_(0x9000) +#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS) +#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0) + +/* Performance Counter registers */ +#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */ +#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */ +#define LOONGARCH_CSR_PERFCTRL1 0x202 /* 32 perf event 1 config */ +#define LOONGARCH_CSR_PERFCNTR1 0x203 /* 64 perf event 1 count value */ +#define LOONGARCH_CSR_PERFCTRL2 0x204 /* 32 perf event 2 config */ +#define LOONGARCH_CSR_PERFCNTR2 0x205 /* 64 perf event 2 count value */ +#define LOONGARCH_CSR_PERFCTRL3 0x206 /* 32 perf event 3 config */ +#define LOONGARCH_CSR_PERFCNTR3 0x207 /* 64 perf event 3 count value */ +#define CSR_PERFCTRL_PLV0 (_ULCAST_(1) << 16) +#define CSR_PERFCTRL_PLV1 (_ULCAST_(1) << 17) +#define CSR_PERFCTRL_PLV2 (_ULCAST_(1) << 18) +#define CSR_PERFCTRL_PLV3 (_ULCAST_(1) << 19) +#define CSR_PERFCTRL_IE (_ULCAST_(1) << 20) +#define CSR_PERFCTRL_EVENT 0x3ff + +/* Debug registers */ +#define LOONGARCH_CSR_MWPC 0x300 /* data breakpoint config */ +#define LOONGARCH_CSR_MWPS 0x301 /* data breakpoint status */ + +#define LOONGARCH_CSR_DB0ADDR 0x310 /* data breakpoint 0 address */ +#define LOONGARCH_CSR_DB0MASK 0x311 /* data breakpoint 0 mask */ +#define LOONGARCH_CSR_DB0CTRL 0x312 /* data breakpoint 0 control */ +#define LOONGARCH_CSR_DB0ASID 0x313 /* data breakpoint 0 asid */ + +#define LOONGARCH_CSR_DB1ADDR 0x318 /* data breakpoint 1 address */ +#define LOONGARCH_CSR_DB1MASK 0x319 /* data breakpoint 1 mask */ +#define LOONGARCH_CSR_DB1CTRL 0x31a /* data breakpoint 1 control */ +#define LOONGARCH_CSR_DB1ASID 0x31b /* data breakpoint 1 asid */ + +#define LOONGARCH_CSR_DB2ADDR 0x320 /* data breakpoint 2 address */ +#define LOONGARCH_CSR_DB2MASK 0x321 /* data breakpoint 2 mask */ +#define LOONGARCH_CSR_DB2CTRL 0x322 /* data breakpoint 2 control */ +#define LOONGARCH_CSR_DB2ASID 0x323 /* data breakpoint 2 asid */ + +#define LOONGARCH_CSR_DB3ADDR 0x328 /* data breakpoint 3 address */ +#define LOONGARCH_CSR_DB3MASK 0x329 /* data breakpoint 3 mask */ +#define LOONGARCH_CSR_DB3CTRL 0x32a /* data breakpoint 3 control */ +#define LOONGARCH_CSR_DB3ASID 0x32b /* data breakpoint 3 asid */ + +#define LOONGARCH_CSR_DB4ADDR 0x330 /* data breakpoint 4 address */ +#define LOONGARCH_CSR_DB4MASK 0x331 /* data breakpoint 4 maks */ +#define LOONGARCH_CSR_DB4CTRL 0x332 /* data breakpoint 4 control */ +#define LOONGARCH_CSR_DB4ASID 0x333 /* data breakpoint 4 asid */ + +#define LOONGARCH_CSR_DB5ADDR 0x338 /* data breakpoint 5 address */ +#define LOONGARCH_CSR_DB5MASK 0x339 /* data breakpoint 5 mask */ +#define LOONGARCH_CSR_DB5CTRL 0x33a /* data breakpoint 5 control */ +#define LOONGARCH_CSR_DB5ASID 0x33b /* data breakpoint 5 asid */ + +#define LOONGARCH_CSR_DB6ADDR 0x340 /* data breakpoint 6 address */ +#define LOONGARCH_CSR_DB6MASK 0x341 /* data breakpoint 6 mask */ +#define LOONGARCH_CSR_DB6CTRL 0x342 /* data breakpoint 6 control */ +#define LOONGARCH_CSR_DB6ASID 0x343 /* data breakpoint 6 asid */ + +#define LOONGARCH_CSR_DB7ADDR 0x348 /* data breakpoint 7 address */ +#define LOONGARCH_CSR_DB7MASK 0x349 /* data breakpoint 7 mask */ +#define LOONGARCH_CSR_DB7CTRL 0x34a /* data breakpoint 7 control */ +#define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */ + +#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */ +#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */ + +#define LOONGARCH_CSR_IB0ADDR 0x390 /* inst breakpoint 0 address */ +#define LOONGARCH_CSR_IB0MASK 0x391 /* inst breakpoint 0 mask */ +#define LOONGARCH_CSR_IB0CTRL 0x392 /* inst breakpoint 0 control */ +#define LOONGARCH_CSR_IB0ASID 0x393 /* inst breakpoint 0 asid */ + +#define LOONGARCH_CSR_IB1ADDR 0x398 /* inst breakpoint 1 address */ +#define LOONGARCH_CSR_IB1MASK 0x399 /* inst breakpoint 1 mask */ +#define LOONGARCH_CSR_IB1CTRL 0x39a /* inst breakpoint 1 control */ +#define LOONGARCH_CSR_IB1ASID 0x39b /* inst breakpoint 1 asid */ + +#define LOONGARCH_CSR_IB2ADDR 0x3a0 /* inst breakpoint 2 address */ +#define LOONGARCH_CSR_IB2MASK 0x3a1 /* inst breakpoint 2 mask */ +#define LOONGARCH_CSR_IB2CTRL 0x3a2 /* inst breakpoint 2 control */ +#define LOONGARCH_CSR_IB2ASID 0x3a3 /* inst breakpoint 2 asid */ + +#define LOONGARCH_CSR_IB3ADDR 0x3a8 /* inst breakpoint 3 address */ +#define LOONGARCH_CSR_IB3MASK 0x3a9 /* breakpoint 3 mask */ +#define LOONGARCH_CSR_IB3CTRL 0x3aa /* inst breakpoint 3 control */ +#define LOONGARCH_CSR_IB3ASID 0x3ab /* inst breakpoint 3 asid */ + +#define LOONGARCH_CSR_IB4ADDR 0x3b0 /* inst breakpoint 4 address */ +#define LOONGARCH_CSR_IB4MASK 0x3b1 /* inst breakpoint 4 mask */ +#define LOONGARCH_CSR_IB4CTRL 0x3b2 /* inst breakpoint 4 control */ +#define LOONGARCH_CSR_IB4ASID 0x3b3 /* inst breakpoint 4 asid */ + +#define LOONGARCH_CSR_IB5ADDR 0x3b8 /* inst breakpoint 5 address */ +#define LOONGARCH_CSR_IB5MASK 0x3b9 /* inst breakpoint 5 mask */ +#define LOONGARCH_CSR_IB5CTRL 0x3ba /* inst breakpoint 5 control */ +#define LOONGARCH_CSR_IB5ASID 0x3bb /* inst breakpoint 5 asid */ + +#define LOONGARCH_CSR_IB6ADDR 0x3c0 /* inst breakpoint 6 address */ +#define LOONGARCH_CSR_IB6MASK 0x3c1 /* inst breakpoint 6 mask */ +#define LOONGARCH_CSR_IB6CTRL 0x3c2 /* inst breakpoint 6 control */ +#define LOONGARCH_CSR_IB6ASID 0x3c3 /* inst breakpoint 6 asid */ + +#define LOONGARCH_CSR_IB7ADDR 0x3c8 /* inst breakpoint 7 address */ +#define LOONGARCH_CSR_IB7MASK 0x3c9 /* inst breakpoint 7 mask */ +#define LOONGARCH_CSR_IB7CTRL 0x3ca /* inst breakpoint 7 control */ +#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */ + +#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */ +#define LOONGARCH_CSR_DERA 0x501 /* debug era */ +#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */ + +#define CSR_FWPC_SKIP_SHIFT 16 +#define CSR_FWPC_SKIP (_ULCAST_(1) << CSR_FWPC_SKIP_SHIFT) + +/* + * CSR_ECFG IM + */ +#define ECFG0_IM 0x00001fff +#define ECFGB_SIP0 0 +#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0) +#define ECFGB_SIP1 1 +#define ECFGF_SIP1 (_ULCAST_(1) << ECFGB_SIP1) +#define ECFGB_IP0 2 +#define ECFGF_IP0 (_ULCAST_(1) << ECFGB_IP0) +#define ECFGB_IP1 3 +#define ECFGF_IP1 (_ULCAST_(1) << ECFGB_IP1) +#define ECFGB_IP2 4 +#define ECFGF_IP2 (_ULCAST_(1) << ECFGB_IP2) +#define ECFGB_IP3 5 +#define ECFGF_IP3 (_ULCAST_(1) << ECFGB_IP3) +#define ECFGB_IP4 6 +#define ECFGF_IP4 (_ULCAST_(1) << ECFGB_IP4) +#define ECFGB_IP5 7 +#define ECFGF_IP5 (_ULCAST_(1) << ECFGB_IP5) +#define ECFGB_IP6 8 +#define ECFGF_IP6 (_ULCAST_(1) << ECFGB_IP6) +#define ECFGB_IP7 9 +#define ECFGF_IP7 (_ULCAST_(1) << ECFGB_IP7) +#define ECFGB_PMC 10 +#define ECFGF_PMC (_ULCAST_(1) << ECFGB_PMC) +#define ECFGB_TIMER 11 +#define ECFGF_TIMER (_ULCAST_(1) << ECFGB_TIMER) +#define ECFGB_IPI 12 +#define ECFGF_IPI (_ULCAST_(1) << ECFGB_IPI) +#define ECFGF(hwirq) (_ULCAST_(1) << hwirq) + +#define ESTATF_IP 0x00003fff + +#define LOONGARCH_IOCSR_FEATURES 0x8 +#define IOCSRF_TEMP BIT_ULL(0) +#define IOCSRF_NODECNT BIT_ULL(1) +#define IOCSRF_MSI BIT_ULL(2) +#define IOCSRF_EXTIOI BIT_ULL(3) +#define IOCSRF_CSRIPI BIT_ULL(4) +#define IOCSRF_FREQCSR BIT_ULL(5) +#define IOCSRF_FREQSCALE BIT_ULL(6) +#define IOCSRF_DVFSV1 BIT_ULL(7) +#define IOCSRF_EIODECODE BIT_ULL(9) +#define IOCSRF_FLATMODE BIT_ULL(10) +#define IOCSRF_VM BIT_ULL(11) + +#define LOONGARCH_IOCSR_VENDOR 0x10 + +#define LOONGARCH_IOCSR_CPUNAME 0x20 + +#define LOONGARCH_IOCSR_NODECNT 0x408 + +#define LOONGARCH_IOCSR_MISC_FUNC 0x420 +#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21) +#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48) + +#define LOONGARCH_IOCSR_CPUTEMP 0x428 + +/* PerCore CSR, only accessible by local cores */ +#define LOONGARCH_IOCSR_IPI_STATUS 0x1000 +#define LOONGARCH_IOCSR_IPI_EN 0x1004 +#define LOONGARCH_IOCSR_IPI_SET 0x1008 +#define LOONGARCH_IOCSR_IPI_CLEAR 0x100c +#define LOONGARCH_IOCSR_MBUF0 0x1020 +#define LOONGARCH_IOCSR_MBUF1 0x1028 +#define LOONGARCH_IOCSR_MBUF2 0x1030 +#define LOONGARCH_IOCSR_MBUF3 0x1038 + +#define LOONGARCH_IOCSR_IPI_SEND 0x1040 +#define IOCSR_IPI_SEND_IP_SHIFT 0 +#define IOCSR_IPI_SEND_CPU_SHIFT 16 +#define IOCSR_IPI_SEND_BLOCKING BIT(31) + +#define LOONGARCH_IOCSR_MBUF_SEND 0x1048 +#define IOCSR_MBUF_SEND_BLOCKING BIT_ULL(31) +#define IOCSR_MBUF_SEND_BOX_SHIFT 2 +#define IOCSR_MBUF_SEND_BOX_LO(box) (box << 1) +#define IOCSR_MBUF_SEND_BOX_HI(box) ((box << 1) + 1) +#define IOCSR_MBUF_SEND_CPU_SHIFT 16 +#define IOCSR_MBUF_SEND_BUF_SHIFT 32 +#define IOCSR_MBUF_SEND_H32_MASK 0xFFFFFFFF00000000ULL + +#define LOONGARCH_IOCSR_ANY_SEND 0x1158 +#define IOCSR_ANY_SEND_BLOCKING BIT_ULL(31) +#define IOCSR_ANY_SEND_CPU_SHIFT 16 +#define IOCSR_ANY_SEND_MASK_SHIFT 27 +#define IOCSR_ANY_SEND_BUF_SHIFT 32 +#define IOCSR_ANY_SEND_H32_MASK 0xFFFFFFFF00000000ULL + +/* Register offset and bit definition for CSR access */ +#define LOONGARCH_IOCSR_TIMER_CFG 0x1060 +#define LOONGARCH_IOCSR_TIMER_TICK 0x1070 +#define IOCSR_TIMER_CFG_RESERVED (_ULCAST_(1) << 63) +#define IOCSR_TIMER_CFG_PERIODIC (_ULCAST_(1) << 62) +#define IOCSR_TIMER_CFG_EN (_ULCAST_(1) << 61) +#define IOCSR_TIMER_MASK 0x0ffffffffffffULL +#define IOCSR_TIMER_INITVAL_RST (_ULCAST_(0xffff) << 48) + +#define LOONGARCH_IOCSR_EXTIOI_NODEMAP_BASE 0x14a0 +#define LOONGARCH_IOCSR_EXTIOI_IPMAP_BASE 0x14c0 +#define LOONGARCH_IOCSR_EXTIOI_EN_BASE 0x1600 +#define LOONGARCH_IOCSR_EXTIOI_BOUNCE_BASE 0x1680 +#define LOONGARCH_IOCSR_EXTIOI_ISR_BASE 0x1800 +#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00 +#define IOCSR_EXTIOI_VECTOR_NUM 256 + +#ifndef __ASSEMBLY__ + +static __always_inline u64 drdtime(void) +{ + int rID = 0; + u64 val = 0; + + __asm__ __volatile__( + "rdtime.d %0, %1 \n\t" + : "=r"(val), "=r"(rID) + : + ); + return val; +} + +static inline unsigned int get_csr_cpuid(void) +{ + return csr_read32(LOONGARCH_CSR_CPUID); +} + +static inline void csr_any_send(unsigned int addr, unsigned int data, + unsigned int data_mask, unsigned int cpu) +{ + uint64_t val = 0; + + val = IOCSR_ANY_SEND_BLOCKING | addr; + val |= (cpu << IOCSR_ANY_SEND_CPU_SHIFT); + val |= (data_mask << IOCSR_ANY_SEND_MASK_SHIFT); + val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT); + iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND); +} + +static inline unsigned int read_csr_excode(void) +{ + return (csr_read32(LOONGARCH_CSR_ESTAT) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; +} + +static inline void write_csr_index(unsigned int idx) +{ + csr_xchg32(idx, CSR_TLBIDX_IDXM, LOONGARCH_CSR_TLBIDX); +} + +static inline unsigned int read_csr_pagesize(void) +{ + return (csr_read32(LOONGARCH_CSR_TLBIDX) & CSR_TLBIDX_SIZEM) >> CSR_TLBIDX_SIZE; +} + +static inline void write_csr_pagesize(unsigned int size) +{ + csr_xchg32(size << CSR_TLBIDX_SIZE, CSR_TLBIDX_SIZEM, LOONGARCH_CSR_TLBIDX); +} + +static inline unsigned int read_csr_tlbrefill_pagesize(void) +{ + return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT; +} + +static inline void write_csr_tlbrefill_pagesize(unsigned int size) +{ + csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI); +} + +#define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID) +#define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID) +#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI) +#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI) +#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0) +#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0) +#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1) +#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1) +#define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG) +#define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG) +#define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT) +#define write_csr_estat(val) csr_write32(val, LOONGARCH_CSR_ESTAT) +#define read_csr_tlbidx() csr_read32(LOONGARCH_CSR_TLBIDX) +#define write_csr_tlbidx(val) csr_write32(val, LOONGARCH_CSR_TLBIDX) +#define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN) +#define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN) +#define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID) +#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1) +#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1) +#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2) +#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2) +#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3) +#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3) +#define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE) +#define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZE) +#define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG) +#define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG) +#define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR) +#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1) +#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1) +#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2) + +#define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0) +#define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0) +#define read_csr_perfctrl1() csr_read64(LOONGARCH_CSR_PERFCTRL1) +#define read_csr_perfcntr1() csr_read64(LOONGARCH_CSR_PERFCNTR1) +#define read_csr_perfctrl2() csr_read64(LOONGARCH_CSR_PERFCTRL2) +#define read_csr_perfcntr2() csr_read64(LOONGARCH_CSR_PERFCNTR2) +#define read_csr_perfctrl3() csr_read64(LOONGARCH_CSR_PERFCTRL3) +#define read_csr_perfcntr3() csr_read64(LOONGARCH_CSR_PERFCNTR3) +#define write_csr_perfctrl0(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL0) +#define write_csr_perfcntr0(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR0) +#define write_csr_perfctrl1(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL1) +#define write_csr_perfcntr1(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR1) +#define write_csr_perfctrl2(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL2) +#define write_csr_perfcntr2(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR2) +#define write_csr_perfctrl3(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL3) +#define write_csr_perfcntr3(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR3) + +/* + * Manipulate bits in a register. + */ +#define __BUILD_CSR_COMMON(name) \ +static inline unsigned long \ +set_##name(unsigned long set) \ +{ \ + unsigned long res, new; \ + \ + res = read_##name(); \ + new = res | set; \ + write_##name(new); \ + \ + return res; \ +} \ + \ +static inline unsigned long \ +clear_##name(unsigned long clear) \ +{ \ + unsigned long res, new; \ + \ + res = read_##name(); \ + new = res & ~clear; \ + write_##name(new); \ + \ + return res; \ +} \ + \ +static inline unsigned long \ +change_##name(unsigned long change, unsigned long val) \ +{ \ + unsigned long res, new; \ + \ + res = read_##name(); \ + new = res & ~change; \ + new |= (val & change); \ + write_##name(new); \ + \ + return res; \ +} + +#define __BUILD_CSR_OP(name) __BUILD_CSR_COMMON(csr_##name) + +__BUILD_CSR_OP(euen) +__BUILD_CSR_OP(ecfg) +__BUILD_CSR_OP(tlbidx) + +#define set_csr_estat(val) \ + csr_xchg32(val, val, LOONGARCH_CSR_ESTAT) +#define clear_csr_estat(val) \ + csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT) + +#endif /* __ASSEMBLY__ */ + +/* Generic EntryLo bit definitions */ +#define ENTRYLO_V (_ULCAST_(1) << 0) +#define ENTRYLO_D (_ULCAST_(1) << 1) +#define ENTRYLO_PLV_SHIFT 2 +#define ENTRYLO_PLV (_ULCAST_(3) << ENTRYLO_PLV_SHIFT) +#define ENTRYLO_C_SHIFT 4 +#define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT) +#define ENTRYLO_G (_ULCAST_(1) << 6) +#define ENTRYLO_NR (_ULCAST_(1) << 61) +#define ENTRYLO_NX (_ULCAST_(1) << 62) + +/* Values for PageSize register */ +#define PS_4K 0x0000000c +#define PS_8K 0x0000000d +#define PS_16K 0x0000000e +#define PS_32K 0x0000000f +#define PS_64K 0x00000010 +#define PS_128K 0x00000011 +#define PS_256K 0x00000012 +#define PS_512K 0x00000013 +#define PS_1M 0x00000014 +#define PS_2M 0x00000015 +#define PS_4M 0x00000016 +#define PS_8M 0x00000017 +#define PS_16M 0x00000018 +#define PS_32M 0x00000019 +#define PS_64M 0x0000001a +#define PS_128M 0x0000001b +#define PS_256M 0x0000001c +#define PS_512M 0x0000001d +#define PS_1G 0x0000001e + +/* Default page size for a given kernel configuration */ +#ifdef CONFIG_PAGE_SIZE_4KB +#define PS_DEFAULT_SIZE PS_4K +#elif defined(CONFIG_PAGE_SIZE_16KB) +#define PS_DEFAULT_SIZE PS_16K +#elif defined(CONFIG_PAGE_SIZE_64KB) +#define PS_DEFAULT_SIZE PS_64K +#else +#error Bad page size configuration! +#endif + +/* Default huge tlb size for a given kernel configuration */ +#ifdef CONFIG_PAGE_SIZE_4KB +#define PS_HUGE_SIZE PS_1M +#elif defined(CONFIG_PAGE_SIZE_16KB) +#define PS_HUGE_SIZE PS_16M +#elif defined(CONFIG_PAGE_SIZE_64KB) +#define PS_HUGE_SIZE PS_256M +#else +#error Bad page size configuration for hugetlbfs! +#endif + +/* ExStatus.ExcCode */ +#define EXCCODE_RSV 0 /* Reserved */ +#define EXCCODE_TLBL 1 /* TLB miss on a load */ +#define EXCCODE_TLBS 2 /* TLB miss on a store */ +#define EXCCODE_TLBI 3 /* TLB miss on a ifetch */ +#define EXCCODE_TLBM 4 /* TLB modified fault */ +#define EXCCODE_TLBNR 5 /* TLB Read-Inhibit exception */ +#define EXCCODE_TLBNX 6 /* TLB Execution-Inhibit exception */ +#define EXCCODE_TLBPE 7 /* TLB Privilege Error */ +#define EXCCODE_ADE 8 /* Address Error */ + #define EXSUBCODE_ADEF 0 /* Fetch Instruction */ + #define EXSUBCODE_ADEM 1 /* Access Memory*/ +#define EXCCODE_ALE 9 /* Unalign Access */ +#define EXCCODE_BCE 10 /* Bounds Check Error */ +#define EXCCODE_SYS 11 /* System call */ +#define EXCCODE_BP 12 /* Breakpoint */ +#define EXCCODE_INE 13 /* Inst. Not Exist */ +#define EXCCODE_IPE 14 /* Inst. Privileged Error */ +#define EXCCODE_FPDIS 15 /* FPU Disabled */ +#define EXCCODE_LSXDIS 16 /* LSX Disabled */ +#define EXCCODE_LASXDIS 17 /* LASX Disabled */ +#define EXCCODE_FPE 18 /* Floating Point Exception */ + #define EXCSUBCODE_FPE 0 /* Floating Point Exception */ + #define EXCSUBCODE_VFPE 1 /* Vector Exception */ +#define EXCCODE_WATCH 19 /* WatchPoint Exception */ + #define EXCSUBCODE_WPEF 0 /* ... on Instruction Fetch */ + #define EXCSUBCODE_WPEM 1 /* ... on Memory Accesses */ +#define EXCCODE_BTDIS 20 /* Binary Trans. Disabled */ +#define EXCCODE_BTE 21 /* Binary Trans. Exception */ +#define EXCCODE_GSPR 22 /* Guest Privileged Error */ +#define EXCCODE_HVC 23 /* Hypercall */ +#define EXCCODE_GCM 24 /* Guest CSR modified */ + #define EXCSUBCODE_GCSC 0 /* Software caused */ + #define EXCSUBCODE_GCHC 1 /* Hardware caused */ +#define EXCCODE_SE 25 /* Security */ + +/* Interrupt numbers */ +#define INT_SWI0 0 /* Software Interrupts */ +#define INT_SWI1 1 +#define INT_HWI0 2 /* Hardware Interrupts */ +#define INT_HWI1 3 +#define INT_HWI2 4 +#define INT_HWI3 5 +#define INT_HWI4 6 +#define INT_HWI5 7 +#define INT_HWI6 8 +#define INT_HWI7 9 +#define INT_PCOV 10 /* Performance Counter Overflow */ +#define INT_TI 11 /* Timer */ +#define INT_IPI 12 +#define INT_NMI 13 + +/* ExcCodes corresponding to interrupts */ +#define EXCCODE_INT_NUM (INT_NMI + 1) +#define EXCCODE_INT_START 64 +#define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1) + +/* FPU Status Register Names */ +#ifndef CONFIG_AS_HAS_FCSR_CLASS +#define LOONGARCH_FCSR0 $r0 +#define LOONGARCH_FCSR1 $r1 +#define LOONGARCH_FCSR2 $r2 +#define LOONGARCH_FCSR3 $r3 +#else +#define LOONGARCH_FCSR0 $fcsr0 +#define LOONGARCH_FCSR1 $fcsr1 +#define LOONGARCH_FCSR2 $fcsr2 +#define LOONGARCH_FCSR3 $fcsr3 +#endif + +/* FPU Status Register Values */ +#define FPU_CSR_RSVD 0xe0e0fce0 + +/* + * X the exception cause indicator + * E the exception enable + * S the sticky/flag bit + */ +#define FPU_CSR_ALL_X 0x1f000000 +#define FPU_CSR_INV_X 0x10000000 +#define FPU_CSR_DIV_X 0x08000000 +#define FPU_CSR_OVF_X 0x04000000 +#define FPU_CSR_UDF_X 0x02000000 +#define FPU_CSR_INE_X 0x01000000 + +#define FPU_CSR_ALL_S 0x001f0000 +#define FPU_CSR_INV_S 0x00100000 +#define FPU_CSR_DIV_S 0x00080000 +#define FPU_CSR_OVF_S 0x00040000 +#define FPU_CSR_UDF_S 0x00020000 +#define FPU_CSR_INE_S 0x00010000 + +#define FPU_CSR_ALL_E 0x0000001f +#define FPU_CSR_INV_E 0x00000010 +#define FPU_CSR_DIV_E 0x00000008 +#define FPU_CSR_OVF_E 0x00000004 +#define FPU_CSR_UDF_E 0x00000002 +#define FPU_CSR_INE_E 0x00000001 + +/* Bits 8 and 9 of FPU Status Register specify the rounding mode */ +#define FPU_CSR_RM 0x300 +#define FPU_CSR_RN 0x000 /* nearest */ +#define FPU_CSR_RZ 0x100 /* towards zero */ +#define FPU_CSR_RU 0x200 /* towards +Infinity */ +#define FPU_CSR_RD 0x300 /* towards -Infinity */ + +/* Bit 6 of FPU Status Register specify the LBT TOP simulation mode */ +#define FPU_CSR_TM_SHIFT 0x6 +#define FPU_CSR_TM (_ULCAST_(1) << FPU_CSR_TM_SHIFT) + +#define read_fcsr(source) \ +({ \ + unsigned int __res; \ +\ + __asm__ __volatile__( \ + " movfcsr2gr %0, "__stringify(source)" \n" \ + : "=r" (__res)); \ + __res; \ +}) + +#define write_fcsr(dest, val) \ +do { \ + __asm__ __volatile__( \ + " movgr2fcsr "__stringify(dest)", %0 \n" \ + : : "r" (val)); \ +} while (0) + +#endif /* _ASM_LOONGARCH_H */ diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h new file mode 100644 index 0000000000..12494cffff --- /dev/null +++ b/arch/loongarch/include/asm/loongson.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGSON_H +#define __ASM_LOONGSON_H + +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <asm/addrspace.h> +#include <asm/bootinfo.h> + +#define LOONGSON_REG(x) \ + (*(volatile u32 *)((char *)TO_UNCACHE(LOONGSON_REG_BASE) + (x))) + +#define LOONGSON_LIO_BASE 0x18000000 +#define LOONGSON_LIO_SIZE 0x00100000 /* 1M */ +#define LOONGSON_LIO_TOP (LOONGSON_LIO_BASE+LOONGSON_LIO_SIZE-1) + +#define LOONGSON_BOOT_BASE 0x1c000000 +#define LOONGSON_BOOT_SIZE 0x02000000 /* 32M */ +#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1) + +#define LOONGSON_REG_BASE 0x1fe00000 +#define LOONGSON_REG_SIZE 0x00100000 /* 1M */ +#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1) + +/* GPIO Regs - r/w */ + +#define LOONGSON_GPIODATA LOONGSON_REG(0x11c) +#define LOONGSON_GPIOIE LOONGSON_REG(0x120) +#define LOONGSON_REG_GPIO_BASE (LOONGSON_REG_BASE + 0x11c) + +#define MAX_PACKAGES 16 + +#define xconf_readl(addr) readl(addr) +#define xconf_readq(addr) readq(addr) + +static inline void xconf_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile ( + " st.w %[v], %[hw], 0 \n" + " ld.b $zero, %[hw], 0 \n" + : + : [hw] "r" (addr), [v] "r" (val) + ); +} + +static inline void xconf_writeq(u64 val64, volatile void __iomem *addr) +{ + asm volatile ( + " st.d %[v], %[hw], 0 \n" + " ld.b $zero, %[hw], 0 \n" + : + : [hw] "r" (addr), [v] "r" (val64) + ); +} + +/* ============== LS7A registers =============== */ +#define LS7A_PCH_REG_BASE 0x10000000UL +/* LPC regs */ +#define LS7A_LPC_REG_BASE (LS7A_PCH_REG_BASE + 0x00002000) +/* CHIPCFG regs */ +#define LS7A_CHIPCFG_REG_BASE (LS7A_PCH_REG_BASE + 0x00010000) +/* MISC reg base */ +#define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000) +/* ACPI regs */ +#define LS7A_ACPI_REG_BASE (LS7A_MISC_REG_BASE + 0x00050000) +/* RTC regs */ +#define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100) + +#define LS7A_DMA_CFG (volatile void *)TO_UNCACHE(LS7A_CHIPCFG_REG_BASE + 0x041c) +#define LS7A_DMA_NODE_SHF 8 +#define LS7A_DMA_NODE_MASK 0x1F00 + +#define LS7A_INT_MASK_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x020) +#define LS7A_INT_EDGE_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x060) +#define LS7A_INT_CLEAR_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x080) +#define LS7A_INT_HTMSI_EN_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x040) +#define LS7A_INT_ROUTE_ENTRY_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x100) +#define LS7A_INT_HTMSI_VEC_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200) +#define LS7A_INT_STATUS_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3a0) +#define LS7A_INT_POL_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3e0) +#define LS7A_LPC_INT_CTL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2000) +#define LS7A_LPC_INT_ENA (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2004) +#define LS7A_LPC_INT_STS (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2008) +#define LS7A_LPC_INT_CLR (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200c) +#define LS7A_LPC_INT_POL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2010) + +#define LS7A_PMCON_SOC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x000) +#define LS7A_PMCON_RESUME_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x004) +#define LS7A_PMCON_RTC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x008) +#define LS7A_PM1_EVT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x00c) +#define LS7A_PM1_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x010) +#define LS7A_PM1_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x014) +#define LS7A_PM1_TMR_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x018) +#define LS7A_P_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x01c) +#define LS7A_GPE0_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x028) +#define LS7A_GPE0_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x02c) +#define LS7A_RST_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x030) +#define LS7A_WD_SET_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x034) +#define LS7A_WD_TIMER_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x038) +#define LS7A_THSENS_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x04c) +#define LS7A_GEN_RTC_1_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x050) +#define LS7A_GEN_RTC_2_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x054) +#define LS7A_DPM_CFG_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x400) +#define LS7A_DPM_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x404) +#define LS7A_DPM_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x408) + +typedef enum { + ACPI_PCI_HOTPLUG_STATUS = 1 << 1, + ACPI_CPU_HOTPLUG_STATUS = 1 << 2, + ACPI_MEM_HOTPLUG_STATUS = 1 << 3, + ACPI_POWERBUTTON_STATUS = 1 << 8, + ACPI_RTC_WAKE_STATUS = 1 << 10, + ACPI_PCI_WAKE_STATUS = 1 << 14, + ACPI_ANY_WAKE_STATUS = 1 << 15, +} AcpiEventStatusBits; + +#define HT1LO_OFFSET 0xe0000000000UL + +/* PCI Configuration Space Base */ +#define MCFG_EXT_PCICFG_BASE 0xefe00000000UL + +/* REG ACCESS*/ +#define ls7a_readb(addr) (*(volatile unsigned char *)TO_UNCACHE(addr)) +#define ls7a_readw(addr) (*(volatile unsigned short *)TO_UNCACHE(addr)) +#define ls7a_readl(addr) (*(volatile unsigned int *)TO_UNCACHE(addr)) +#define ls7a_readq(addr) (*(volatile unsigned long *)TO_UNCACHE(addr)) +#define ls7a_writeb(val, addr) *(volatile unsigned char *)TO_UNCACHE(addr) = (val) +#define ls7a_writew(val, addr) *(volatile unsigned short *)TO_UNCACHE(addr) = (val) +#define ls7a_writel(val, addr) *(volatile unsigned int *)TO_UNCACHE(addr) = (val) +#define ls7a_writeq(val, addr) *(volatile unsigned long *)TO_UNCACHE(addr) = (val) + +void enable_gpe_wakeup(void); +void enable_pci_wakeup(void); + +#endif /* __ASM_LOONGSON_H */ diff --git a/arch/loongarch/include/asm/mmu.h b/arch/loongarch/include/asm/mmu.h new file mode 100644 index 0000000000..0cc2d08035 --- /dev/null +++ b/arch/loongarch/include/asm/mmu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_MMU_H +#define __ASM_MMU_H + +#include <linux/atomic.h> +#include <linux/spinlock.h> + +typedef struct { + u64 asid[NR_CPUS]; + void *vdso; +} mm_context_t; + +#endif /* __ASM_MMU_H */ diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h new file mode 100644 index 0000000000..9f97c3453b --- /dev/null +++ b/arch/loongarch/include/asm/mmu_context.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Switch a MMU context. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_MMU_CONTEXT_H +#define _ASM_MMU_CONTEXT_H + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/mm_types.h> +#include <linux/smp.h> +#include <linux/slab.h> + +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include <asm-generic/mm_hooks.h> + +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ +static inline u64 asid_version_mask(unsigned int cpu) +{ + return ~(u64)(cpu_asid_mask(&cpu_data[cpu])); +} + +static inline u64 asid_first_version(unsigned int cpu) +{ + return cpu_asid_mask(&cpu_data[cpu]) + 1; +} + +#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) +#define asid_cache(cpu) (cpu_data[cpu].asid_cache) +#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) + +static inline int asid_valid(struct mm_struct *mm, unsigned int cpu) +{ + if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu)) + return 0; + + return 1; +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +/* Normal, classic get_new_mmu_context */ +static inline void +get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) +{ + u64 asid = asid_cache(cpu); + + if (!((++asid) & cpu_asid_mask(&cpu_data[cpu]))) + local_flush_tlb_user(); /* start new asid cycle */ + + cpu_context(cpu, mm) = asid_cache(cpu) = asid; +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + int i; + + for_each_possible_cpu(i) + cpu_context(i, mm) = 0; + + return 0; +} + +static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + + /* Check if our ASID is of an older version and thus invalid */ + if (!asid_valid(next, cpu)) + get_new_mmu_context(next, cpu); + + write_csr_asid(cpu_asid(cpu, next)); + + if (next != &init_mm) + csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL); + else + csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + + /* + * Mark current->active_mm as not "active" anymore. + * We don't want to mislead possible IPI tlb flush routines. + */ + cpumask_set_cpu(cpu, mm_cpumask(next)); +} + +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +static inline void destroy_context(struct mm_struct *mm) +{ +} + +#define activate_mm(prev, next) switch_mm(prev, next, current) +#define deactivate_mm(task, mm) do { } while (0) + +/* + * If mm is currently active, we can't really drop it. + * Instead, we will get a new one for it. + */ +static inline void +drop_mmu_context(struct mm_struct *mm, unsigned int cpu) +{ + int asid; + unsigned long flags; + + local_irq_save(flags); + + asid = read_csr_asid() & cpu_asid_mask(¤t_cpu_data); + + if (asid == cpu_asid(cpu, mm)) { + if (!current->mm || (current->mm == mm)) { + get_new_mmu_context(mm, cpu); + write_csr_asid(cpu_asid(cpu, mm)); + goto out; + } + } + + /* Will get a new context next time */ + cpu_context(cpu, mm) = 0; + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +out: + local_irq_restore(flags); +} + +#endif /* _ASM_MMU_CONTEXT_H */ diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h new file mode 100644 index 0000000000..2b9a90727e --- /dev/null +++ b/arch/loongarch/include/asm/mmzone.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen (chenhuacai@loongson.cn) + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_MMZONE_H_ +#define _ASM_MMZONE_H_ + +#include <asm/page.h> +#include <asm/numa.h> + +extern struct pglist_data *node_data[]; + +#define NODE_DATA(nid) (node_data[(nid)]) + +#endif /* _ASM_MMZONE_H_ */ diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h new file mode 100644 index 0000000000..2ecd82bb64 --- /dev/null +++ b/arch/loongarch/include/asm/module.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_MODULE_H +#define _ASM_MODULE_H + +#include <asm/inst.h> +#include <asm-generic/module.h> + +#define RELA_STACK_DEPTH 16 + +struct mod_section { + int shndx; + int num_entries; + int max_entries; +}; + +struct mod_arch_specific { + struct mod_section got; + struct mod_section plt; + struct mod_section plt_idx; + + /* For CONFIG_DYNAMIC_FTRACE */ + struct plt_entry *ftrace_trampolines; +}; + +struct got_entry { + Elf_Addr symbol_addr; +}; + +struct plt_entry { + u32 inst_lu12iw; + u32 inst_lu32id; + u32 inst_lu52id; + u32 inst_jirl; +}; + +struct plt_idx_entry { + Elf_Addr symbol_addr; +}; + +Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val); +Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val); + +static inline struct got_entry emit_got_entry(Elf_Addr val) +{ + return (struct got_entry) { val }; +} + +static inline struct plt_entry emit_plt_entry(unsigned long val) +{ + u32 lu12iw, lu32id, lu52id, jirl; + + lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW)); + lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID)); + lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID)); + jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI)); + + return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl }; +} + +static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val) +{ + return (struct plt_idx_entry) { val }; +} + +static inline int get_plt_idx(unsigned long val, Elf_Shdr *sechdrs, const struct mod_section *sec) +{ + int i; + struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sechdrs[sec->shndx].sh_addr; + + for (i = 0; i < sec->num_entries; i++) { + if (plt_idx[i].symbol_addr == val) + return i; + } + + return -1; +} + +static inline struct plt_entry *get_plt_entry(unsigned long val, + Elf_Shdr *sechdrs, + const struct mod_section *sec_plt, + const struct mod_section *sec_plt_idx) +{ + int plt_idx = get_plt_idx(val, sechdrs, sec_plt_idx); + struct plt_entry *plt = (struct plt_entry *)sechdrs[sec_plt->shndx].sh_addr; + + if (plt_idx < 0) + return NULL; + + return plt + plt_idx; +} + +static inline struct got_entry *get_got_entry(Elf_Addr val, + Elf_Shdr *sechdrs, + const struct mod_section *sec) +{ + int i; + struct got_entry *got = (struct got_entry *)sechdrs[sec->shndx].sh_addr; + + for (i = 0; i < sec->num_entries; i++) + if (got[i].symbol_addr == val) + return &got[i]; + return NULL; +} + +#endif /* _ASM_MODULE_H */ diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h new file mode 100644 index 0000000000..88554f92e0 --- /dev/null +++ b/arch/loongarch/include/asm/module.lds.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +SECTIONS { + . = ALIGN(4); + .got 0 : { BYTE(0) } + .plt 0 : { BYTE(0) } + .plt.idx 0 : { BYTE(0) } + .ftrace_trampoline 0 : { BYTE(0) } +} diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h new file mode 100644 index 0000000000..27f319b498 --- /dev/null +++ b/arch/loongarch/include/asm/numa.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Jianmin Lv <lvjianmin@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_NUMA_H +#define _ASM_LOONGARCH_NUMA_H + +#include <linux/nodemask.h> + +#define NODE_ADDRSPACE_SHIFT 44 + +#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) +#define nid_to_addrbase(nid) (_ULCAST_(nid) << NODE_ADDRSPACE_SHIFT) + +#ifdef CONFIG_NUMA + +extern int numa_off; +extern s16 __cpuid_to_node[CONFIG_NR_CPUS]; +extern nodemask_t numa_nodes_parsed __initdata; + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; + +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); + +extern void __init early_numa_add_cpu(int cpuid, s16 node); +extern void numa_add_cpu(unsigned int cpu); +extern void numa_remove_cpu(unsigned int cpu); + +static inline void numa_clear_node(int cpu) +{ +} + +static inline void set_cpuid_to_node(int cpuid, s16 node) +{ + __cpuid_to_node[cpuid] = node; +} + +extern int early_cpu_to_node(int cpu); + +#else + +static inline void early_numa_add_cpu(int cpuid, s16 node) { } +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } + +static inline int early_cpu_to_node(int cpu) +{ + return 0; +} + +#endif /* CONFIG_NUMA */ + +#endif /* _ASM_LOONGARCH_NUMA_H */ diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h new file mode 100644 index 0000000000..63f137ce82 --- /dev/null +++ b/arch/loongarch/include/asm/page.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PAGE_H +#define _ASM_PAGE_H + +#include <linux/const.h> +#include <asm/addrspace.h> + +/* + * PAGE_SHIFT determines the page size + */ +#ifdef CONFIG_PAGE_SIZE_4KB +#define PAGE_SHIFT 12 +#endif +#ifdef CONFIG_PAGE_SIZE_16KB +#define PAGE_SHIFT 14 +#endif +#ifdef CONFIG_PAGE_SIZE_64KB +#define PAGE_SHIFT 16 +#endif +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#ifndef __ASSEMBLY__ + +#include <linux/kernel.h> +#include <linux/pfn.h> + +/* + * It's normally defined only for FLATMEM config but it's + * used in our early mem init code for all memory models. + * So always define it. + */ +#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET) + +extern void clear_page(void *page); +extern void copy_page(void *to, void *from); + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +extern unsigned long shm_align_mask; + +struct page; +struct vm_area_struct; +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE + +typedef struct { unsigned long pte; } pte_t; +#define pte_val(x) ((x).pte) +#define __pte(x) ((pte_t) { (x) }) +typedef struct page *pgtable_t; + +typedef struct { unsigned long pgd; } pgd_t; +#define pgd_val(x) ((x).pgd) +#define __pgd(x) ((pgd_t) { (x) }) + +/* + * Manipulate page protection bits + */ +typedef struct { unsigned long pgprot; } pgprot_t; +#define pgprot_val(x) ((x).pgprot) +#define __pgprot(x) ((pgprot_t) { (x) }) +#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK) + +#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) + +/* + * __pa()/__va() should be used only during mem init. + */ +#define __pa(x) PHYSADDR(x) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) + +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) + +#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr)) + +#define virt_to_page(kaddr) \ +({ \ + (likely((unsigned long)kaddr < vm_map_base)) ? \ + dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\ +}) + +extern int __virt_addr_valid(volatile void *kaddr); +#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr)) + +#define VM_DATA_DEFAULT_FLAGS \ + (VM_READ | VM_WRITE | \ + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_PAGE_H */ diff --git a/arch/loongarch/include/asm/pci.h b/arch/loongarch/include/asm/pci.h new file mode 100644 index 0000000000..846909d7e8 --- /dev/null +++ b/arch/loongarch/include/asm/pci.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PCI_H +#define _ASM_PCI_H + +#include <linux/ioport.h> +#include <linux/list.h> +#include <linux/types.h> +#include <asm/io.h> + +#define PCIBIOS_MIN_IO 0x4000 +#define PCIBIOS_MIN_MEM 0x20000000 +#define PCIBIOS_MIN_CARDBUS_IO 0x4000 + +#define HAVE_PCI_MMAP +#define pcibios_assign_all_busses() 0 + +extern phys_addr_t mcfg_addr_init(int node); + +/* generic pci stuff */ +#include <asm-generic/pci.h> + +#endif /* _ASM_PCI_H */ diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h new file mode 100644 index 0000000000..ed5da02b1c --- /dev/null +++ b/arch/loongarch/include/asm/percpu.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_PERCPU_H +#define __ASM_PERCPU_H + +#include <asm/cmpxchg.h> +#include <asm/loongarch.h> + +/* + * The "address" (in fact, offset from $r21) of a per-CPU variable is close to + * the loading address of main kernel image, but far from where the modules are + * loaded. Tell the compiler this fact when using explicit relocs. + */ +#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) +# if __has_attribute(model) +# define PER_CPU_ATTRIBUTES __attribute__((model("extreme"))) +# else +# error compiler support for the model attribute is necessary when a recent assembler is used +# endif +#endif + +/* Use r21 for fast access */ +register unsigned long __my_cpu_offset __asm__("$r21"); + +static inline void set_my_cpu_offset(unsigned long off) +{ + __my_cpu_offset = off; + csr_write64(off, PERCPU_BASE_KS); +} +#define __my_cpu_offset __my_cpu_offset + +#define PERCPU_OP(op, asm_op, c_op) \ +static __always_inline unsigned long __percpu_##op(void *ptr, \ + unsigned long val, int size) \ +{ \ + unsigned long ret; \ + \ + switch (size) { \ + case 4: \ + __asm__ __volatile__( \ + "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \ + : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \ + : [val] "r" (val)); \ + break; \ + case 8: \ + __asm__ __volatile__( \ + "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \ + : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \ + : [val] "r" (val)); \ + break; \ + default: \ + ret = 0; \ + BUILD_BUG(); \ + } \ + \ + return ret c_op val; \ +} + +PERCPU_OP(add, add, +) +PERCPU_OP(and, and, &) +PERCPU_OP(or, or, |) +#undef PERCPU_OP + +static __always_inline unsigned long __percpu_read(void *ptr, int size) +{ + unsigned long ret; + + switch (size) { + case 1: + __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n" + : [ret] "=&r"(ret) + : [ptr] "r"(ptr) + : "memory"); + break; + case 2: + __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n" + : [ret] "=&r"(ret) + : [ptr] "r"(ptr) + : "memory"); + break; + case 4: + __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n" + : [ret] "=&r"(ret) + : [ptr] "r"(ptr) + : "memory"); + break; + case 8: + __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n" + : [ret] "=&r"(ret) + : [ptr] "r"(ptr) + : "memory"); + break; + default: + ret = 0; + BUILD_BUG(); + } + + return ret; +} + +static __always_inline void __percpu_write(void *ptr, unsigned long val, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n" + : + : [val] "r" (val), [ptr] "r" (ptr) + : "memory"); + break; + case 2: + __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n" + : + : [val] "r" (val), [ptr] "r" (ptr) + : "memory"); + break; + case 4: + __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n" + : + : [val] "r" (val), [ptr] "r" (ptr) + : "memory"); + break; + case 8: + __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n" + : + : [val] "r" (val), [ptr] "r" (ptr) + : "memory"); + break; + default: + BUILD_BUG(); + } +} + +static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, + int size) +{ + switch (size) { + case 1: + case 2: + return __xchg_small((volatile void *)ptr, val, size); + + case 4: + return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val); + + case 8: + return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val); + + default: + BUILD_BUG(); + } + + return 0; +} + +/* this_cpu_cmpxchg */ +#define _protect_cmpxchg_local(pcp, o, n) \ +({ \ + typeof(*raw_cpu_ptr(&(pcp))) __ret; \ + preempt_disable_notrace(); \ + __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ + preempt_enable_notrace(); \ + __ret; \ +}) + +#define _percpu_read(pcp) \ +({ \ + typeof(pcp) __retval; \ + __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \ + __retval; \ +}) + +#define _percpu_write(pcp, val) \ +do { \ + __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \ +} while (0) \ + +#define _pcp_protect(operation, pcp, val) \ +({ \ + typeof(pcp) __retval; \ + preempt_disable_notrace(); \ + __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ + (val), sizeof(pcp)); \ + preempt_enable_notrace(); \ + __retval; \ +}) + +#define _percpu_add(pcp, val) \ + _pcp_protect(__percpu_add, pcp, val) + +#define _percpu_add_return(pcp, val) _percpu_add(pcp, val) + +#define _percpu_and(pcp, val) \ + _pcp_protect(__percpu_and, pcp, val) + +#define _percpu_or(pcp, val) \ + _pcp_protect(__percpu_or, pcp, val) + +#define _percpu_xchg(pcp, val) ((typeof(pcp)) \ + _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))) + +#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) +#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) + +#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val) +#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val) + +#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val) +#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val) + +#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) +#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) + +#define this_cpu_read_1(pcp) _percpu_read(pcp) +#define this_cpu_read_2(pcp) _percpu_read(pcp) +#define this_cpu_read_4(pcp) _percpu_read(pcp) +#define this_cpu_read_8(pcp) _percpu_read(pcp) + +#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) + +#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) + +#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) + +#include <asm-generic/percpu.h> + +#endif /* __ASM_PERCPU_H */ diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h new file mode 100644 index 0000000000..2a35a0bc2a --- /dev/null +++ b/arch/loongarch/include/asm/perf_event.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __LOONGARCH_PERF_EVENT_H__ +#define __LOONGARCH_PERF_EVENT_H__ + +#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs + +#endif /* __LOONGARCH_PERF_EVENT_H__ */ diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h new file mode 100644 index 0000000000..79470f0b4f --- /dev/null +++ b/arch/loongarch/include/asm/pgalloc.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PGALLOC_H +#define _ASM_PGALLOC_H + +#include <linux/mm.h> +#include <linux/sched.h> + +#define __HAVE_ARCH_PMD_ALLOC_ONE +#define __HAVE_ARCH_PUD_ALLOC_ONE +#include <asm-generic/pgalloc.h> + +static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + set_pmd(pmd, __pmd((unsigned long)pte)); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + set_pmd(pmd, __pmd((unsigned long)page_address(pte))); +} + +#ifndef __PAGETABLE_PMD_FOLDED + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud((unsigned long)pmd)); +} +#endif + +#ifndef __PAGETABLE_PUD_FOLDED + +static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + set_p4d(p4d, __p4d((unsigned long)pud)); +} + +#endif /* __PAGETABLE_PUD_FOLDED */ + +extern void pagetable_init(void); + +extern pgd_t *pgd_alloc(struct mm_struct *mm); + +#define __pte_free_tlb(tlb, pte, address) \ +do { \ + pagetable_pte_dtor(page_ptdesc(pte)); \ + tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \ +} while (0) + +#ifndef __PAGETABLE_PMD_FOLDED + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) +{ + pmd_t *pmd; + struct ptdesc *ptdesc; + + ptdesc = pagetable_alloc(GFP_KERNEL_ACCOUNT, 0); + if (!ptdesc) + return NULL; + + if (!pagetable_pmd_ctor(ptdesc)) { + pagetable_free(ptdesc); + return NULL; + } + + pmd = ptdesc_address(ptdesc); + pmd_init(pmd); + return pmd; +} + +#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) + +#endif + +#ifndef __PAGETABLE_PUD_FOLDED + +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) +{ + pud_t *pud; + struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0); + + if (!ptdesc) + return NULL; + pud = ptdesc_address(ptdesc); + + pud_init(pud); + return pud; +} + +#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x) + +#endif /* __PAGETABLE_PUD_FOLDED */ + +extern pte_t * __init populate_kernel_pte(unsigned long addr); +#endif /* _ASM_PGALLOC_H */ diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h new file mode 100644 index 0000000000..21319c1e04 --- /dev/null +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PGTABLE_BITS_H +#define _ASM_PGTABLE_BITS_H + +/* Page table bits */ +#define _PAGE_VALID_SHIFT 0 +#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */ +#define _PAGE_DIRTY_SHIFT 1 +#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */ +#define _CACHE_SHIFT 4 /* 4~5, two bits */ +#define _PAGE_GLOBAL_SHIFT 6 +#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */ +#define _PAGE_PRESENT_SHIFT 7 +#define _PAGE_WRITE_SHIFT 8 +#define _PAGE_MODIFIED_SHIFT 9 +#define _PAGE_PROTNONE_SHIFT 10 +#define _PAGE_SPECIAL_SHIFT 11 +#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ +#define _PAGE_PFN_SHIFT 12 +#define _PAGE_SWP_EXCLUSIVE_SHIFT 23 +#define _PAGE_PFN_END_SHIFT 48 +#define _PAGE_PRESENT_INVALID_SHIFT 60 +#define _PAGE_NO_READ_SHIFT 61 +#define _PAGE_NO_EXEC_SHIFT 62 +#define _PAGE_RPLV_SHIFT 63 + +/* Used by software */ +#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT) +#define _PAGE_PRESENT_INVALID (_ULCAST_(1) << _PAGE_PRESENT_INVALID_SHIFT) +#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT) +#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) +#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) +#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) + +/* We borrow bit 23 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) + +/* Used by TLB hardware (placed in EntryLo*) */ +#define _PAGE_VALID (_ULCAST_(1) << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY (_ULCAST_(1) << _PAGE_DIRTY_SHIFT) +#define _PAGE_PLV (_ULCAST_(3) << _PAGE_PLV_SHIFT) +#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT) +#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT) +#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT) +#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT) +#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT) +#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT) +#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT) +#define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT) + +#define _PAGE_USER (PLV_USER << _PAGE_PLV_SHIFT) +#define _PAGE_KERN (PLV_KERN << _PAGE_PLV_SHIFT) + +#define _PFN_MASK (~((_ULCAST_(1) << (PFN_PTE_SHIFT)) - 1) & \ + ((_ULCAST_(1) << (_PAGE_PFN_END_SHIFT)) - 1)) + +/* + * Cache attributes + */ +#ifndef _CACHE_SUC +#define _CACHE_SUC (0<<_CACHE_SHIFT) /* Strong-ordered UnCached */ +#endif +#ifndef _CACHE_CC +#define _CACHE_CC (1<<_CACHE_SHIFT) /* Coherent Cached */ +#endif +#ifndef _CACHE_WUC +#define _CACHE_WUC (2<<_CACHE_SHIFT) /* Weak-ordered UnCached */ +#endif + +#define __READABLE (_PAGE_VALID) +#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE) + +#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) +#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) + +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \ + _PAGE_USER | _CACHE_CC) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ + _PAGE_USER | _CACHE_CC) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _CACHE_CC) + +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_KERN | _CACHE_CC) +#define PAGE_KERNEL_SUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC) +#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) + +#ifndef __ASSEMBLY__ + +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC) + +#define pgprot_noncached pgprot_noncached + +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_SUC; + + return __pgprot(prot); +} + +extern bool wc_enabled; + +#define pgprot_writecombine pgprot_writecombine + +static inline pgprot_t pgprot_writecombine(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | (wc_enabled ? _CACHE_WUC : _CACHE_SUC); + + return __pgprot(prot); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_BITS_H */ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h new file mode 100644 index 0000000000..29d9b12298 --- /dev/null +++ b/arch/loongarch/include/asm/pgtable.h @@ -0,0 +1,627 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. + */ +#ifndef _ASM_PGTABLE_H +#define _ASM_PGTABLE_H + +#include <linux/compiler.h> +#include <asm/addrspace.h> +#include <asm/page.h> +#include <asm/pgtable-bits.h> + +#if CONFIG_PGTABLE_LEVELS == 2 +#include <asm-generic/pgtable-nopmd.h> +#elif CONFIG_PGTABLE_LEVELS == 3 +#include <asm-generic/pgtable-nopud.h> +#else +#include <asm-generic/pgtable-nop4d.h> +#endif + +#if CONFIG_PGTABLE_LEVELS == 2 +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#elif CONFIG_PGTABLE_LEVELS == 3 +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) +#elif CONFIG_PGTABLE_LEVELS == 4 +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) +#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) +#endif + +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) + +#define PTRS_PER_PGD (PAGE_SIZE >> 3) +#if CONFIG_PGTABLE_LEVELS > 3 +#define PTRS_PER_PUD (PAGE_SIZE >> 3) +#endif +#if CONFIG_PGTABLE_LEVELS > 2 +#define PTRS_PER_PMD (PAGE_SIZE >> 3) +#endif +#define PTRS_PER_PTE (PAGE_SIZE >> 3) + +#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) + +#ifndef __ASSEMBLY__ + +#include <linux/mm_types.h> +#include <linux/mmzone.h> +#include <asm/fixmap.h> +#include <asm/sparsemem.h> + +struct mm_struct; +struct vm_area_struct; + +/* + * ZERO_PAGE is a global shared page that is always zero; used + * for zero-mapped memory areas etc.. + */ + +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) + +/* + * TLB refill handlers may also map the vmalloc area into xkvrange. + * Avoid the first couple of pages so NULL pointer dereferences will + * still reliably trap. + */ +#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) +#define MODULES_END (MODULES_VADDR + SZ_256M) + +#ifdef CONFIG_KFENCE +#define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE) +#else +#define KFENCE_AREA_SIZE 0 +#endif + +#define VMALLOC_START MODULES_END + +#ifndef CONFIG_KASAN +#define VMALLOC_END \ + (vm_map_base + \ + min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) +#else +#define VMALLOC_END \ + (vm_map_base + \ + min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) +#endif + +#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK)) +#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1) + +#define KFENCE_AREA_START (VMEMMAP_END + 1) +#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#ifndef __PAGETABLE_PMD_FOLDED +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#endif +#ifndef __PAGETABLE_PUD_FOLDED +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#endif +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) + +extern pte_t invalid_pte_table[PTRS_PER_PTE]; + +#ifndef __PAGETABLE_PUD_FOLDED + +typedef struct { unsigned long pud; } pud_t; +#define pud_val(x) ((x).pud) +#define __pud(x) ((pud_t) { (x) }) + +extern pud_t invalid_pud_table[PTRS_PER_PUD]; + +/* + * Empty pgd/p4d entries point to the invalid_pud_table. + */ +static inline int p4d_none(p4d_t p4d) +{ + return p4d_val(p4d) == (unsigned long)invalid_pud_table; +} + +static inline int p4d_bad(p4d_t p4d) +{ + return p4d_val(p4d) & ~PAGE_MASK; +} + +static inline int p4d_present(p4d_t p4d) +{ + return p4d_val(p4d) != (unsigned long)invalid_pud_table; +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + p4d_val(*p4dp) = (unsigned long)invalid_pud_table; +} + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + return (pud_t *)p4d_val(p4d); +} + +static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) +{ + *p4d = p4dval; +} + +#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) +#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) + +#endif + +#ifndef __PAGETABLE_PMD_FOLDED + +typedef struct { unsigned long pmd; } pmd_t; +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) }) + +extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; + +/* + * Empty pud entries point to the invalid_pmd_table. + */ +static inline int pud_none(pud_t pud) +{ + return pud_val(pud) == (unsigned long)invalid_pmd_table; +} + +static inline int pud_bad(pud_t pud) +{ + return pud_val(pud) & ~PAGE_MASK; +} + +static inline int pud_present(pud_t pud) +{ + return pud_val(pud) != (unsigned long)invalid_pmd_table; +} + +static inline void pud_clear(pud_t *pudp) +{ + pud_val(*pudp) = ((unsigned long)invalid_pmd_table); +} + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)pud_val(pud); +} + +#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) + +#define pud_phys(pud) PHYSADDR(pud_val(pud)) +#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) + +#endif + +/* + * Empty pmd entries point to the invalid_pte_table. + */ +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == (unsigned long)invalid_pte_table; +} + +static inline int pmd_bad(pmd_t pmd) +{ + return (pmd_val(pmd) & ~PAGE_MASK); +} + +static inline int pmd_present(pmd_t pmd) +{ + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) + return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID)); + + return pmd_val(pmd) != (unsigned long)invalid_pte_table; +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); +} + +#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) + +#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) + +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#define pmd_page_vaddr(pmd) pmd_val(pmd) + +extern pmd_t mk_pmd(struct page *page, pgprot_t prot); +extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); + +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT)) +#define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) + +/* + * Initialize a new pgd / pud / pmd table with invalid pointers. + */ +extern void pgd_init(void *addr); +extern void pud_init(void *addr); +extern void pmd_init(void *addr); + +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs: + * + * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 + * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 + * <--------------------------- offset --------------------------- + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * --------------> E <--- type ---> <---------- zeroes ----------> + * + * E is the exclusive marker that is not stored in swap entries. + * The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE. + */ +static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) +{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; } + +#define __swp_type(x) (((x).val >> 16) & 0x7f) +#define __swp_offset(x) ((x).val >> 24) +#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) +#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) + +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + +extern void paging_init(void); + +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) +#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) + +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; + if (pte_val(pteval) & _PAGE_GLOBAL) { + pte_t *buddy = ptep_buddy(ptep); + /* + * Make sure the buddy is global too (if it's !none, + * it better already be global) + */ +#ifdef CONFIG_SMP + /* + * For SMP, multiple CPUs can race, so we need to do + * this atomically. + */ + unsigned long page_global = _PAGE_GLOBAL; + unsigned long tmp; + + __asm__ __volatile__ ( + "1:" __LL "%[tmp], %[buddy] \n" + " bnez %[tmp], 2f \n" + " or %[tmp], %[tmp], %[global] \n" + __SC "%[tmp], %[buddy] \n" + " beqz %[tmp], 1b \n" + " nop \n" + "2: \n" + __WEAK_LLSC_MB + : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) + : [global] "r" (page_global)); +#else /* !CONFIG_SMP */ + if (pte_none(*buddy)) + pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; +#endif /* CONFIG_SMP */ + } +} + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + /* Preserve global status for the pair */ + if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) + set_pte(ptep, __pte(_PAGE_GLOBAL)); + else + set_pte(ptep, __pte(0)); +} + +#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) +#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) +#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) + +extern pgd_t swapper_pg_dir[]; +extern pgd_t invalid_pg_dir[]; + +struct page *dmw_virt_to_page(unsigned long kaddr); +struct page *tlb_virt_to_page(unsigned long kaddr); + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); } + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_ACCESSED; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkwrite_novma(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); + return pte; +} + +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } + +static inline pte_t pte_mkhuge(pte_t pte) +{ + pte_val(pte) |= _PAGE_HUGE; + return pte; +} + +#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } +static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + +#define pte_accessible pte_accessible +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) +{ + if (pte_val(a) & _PAGE_PRESENT) + return true; + + if ((pte_val(a) & _PAGE_PROTNONE) && + atomic_read(&mm->tlb_flush_pending)) + return true; + + return false; +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | + (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); +} + +extern void __update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); + +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) +{ + for (;;) { + __update_tlb(vma, address, ptep); + if (--nr == 0) + break; + address += PAGE_SIZE; + ptep++; + } +} +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) + +#define __HAVE_ARCH_UPDATE_MMU_TLB +#define update_mmu_tlb update_mmu_cache + +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + __update_tlb(vma, address, (pte_t *)pmdp); +} + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ +#define pmdp_establish generic_pmdp_establish + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd); +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) | + ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)); + pmd_val(pmd) |= _PAGE_HUGE; + + return pmd; +} + +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_WRITE); +} + +static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_WRITE; + if (pmd_val(pmd) & _PAGE_MODIFIED) + pmd_val(pmd) |= _PAGE_DIRTY; + return pmd; +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY); + return pmd; +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED)); +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_MODIFIED; + if (pmd_val(pmd) & _PAGE_WRITE) + pmd_val(pmd) |= _PAGE_DIRTY; + return pmd; +} + +#define pmd_young pmd_young +static inline int pmd_young(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_ACCESSED); +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_ACCESSED; + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_ACCESSED; + return pmd; +} + +static inline struct page *pmd_page(pmd_t pmd) +{ + if (pmd_trans_huge(pmd)) + return pfn_to_page(pmd_pfn(pmd)); + + return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) | + (pgprot_val(newprot) & ~_HPAGE_CHG_MASK); + return pmd; +} + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_PRESENT_INVALID; + pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE); + + return pmd; +} + +/* + * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a + * different prototype. + */ +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old = *pmdp; + + pmd_clear(pmdp); + + return old; +} + +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifdef CONFIG_NUMA_BALANCING +static inline long pte_protnone(pte_t pte) +{ + return (pte_val(pte) & _PAGE_PROTNONE); +} + +static inline long pmd_protnone(pmd_t pmd) +{ + return (pmd_val(pmd) & _PAGE_PROTNONE); +} +#endif /* CONFIG_NUMA_BALANCING */ + +#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) +#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) + +/* + * We provide our own get_unmapped area to cope with the virtual aliasing + * constraints placed on us by the cache architecture. + */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_H */ diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h new file mode 100644 index 0000000000..1672262a5e --- /dev/null +++ b/arch/loongarch/include/asm/prefetch.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_PREFETCH_H +#define __ASM_PREFETCH_H + +#define Pref_Load 0 +#define Pref_Store 8 + +#ifdef __ASSEMBLY__ + + .macro __pref hint addr +#ifdef CONFIG_CPU_HAS_PREFETCH + preld \hint, \addr, 0 +#endif + .endm + + .macro pref_load addr + __pref Pref_Load, \addr + .endm + + .macro pref_store addr + __pref Pref_Store, \addr + .endm + +#endif + +#endif /* __ASM_PREFETCH_H */ diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h new file mode 100644 index 0000000000..c3bc44b5f5 --- /dev/null +++ b/arch/loongarch/include/asm/processor.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PROCESSOR_H +#define _ASM_PROCESSOR_H + +#include <linux/atomic.h> +#include <linux/cpumask.h> +#include <linux/sizes.h> + +#include <asm/cpu.h> +#include <asm/cpu-info.h> +#include <asm/hw_breakpoint.h> +#include <asm/loongarch.h> +#include <asm/vdso/processor.h> +#include <uapi/asm/ptrace.h> +#include <uapi/asm/sigcontext.h> + +#ifdef CONFIG_32BIT + +#define TASK_SIZE 0x80000000UL +#define TASK_SIZE_MIN TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE + +#define TASK_IS_32BIT_ADDR 1 + +#endif + +#ifdef CONFIG_64BIT + +#define TASK_SIZE32 0x100000000UL +#define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits)) + +#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) +#define TASK_SIZE_MIN TASK_SIZE32 +#define STACK_TOP_MAX TASK_SIZE64 + +#define TASK_SIZE_OF(tsk) \ + (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) + +#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR) + +#endif + +#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M) + +unsigned long stack_top(void); +#define STACK_TOP stack_top() + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) + +#define FPU_REG_WIDTH 256 +#define FPU_ALIGN __attribute__((aligned(32))) + +union fpureg { + __u32 val32[FPU_REG_WIDTH / 32]; + __u64 val64[FPU_REG_WIDTH / 64]; +}; + +#define FPR_IDX(width, idx) (idx) + +#define BUILD_FPR_ACCESS(width) \ +static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \ +{ \ + return fpr->val##width[FPR_IDX(width, idx)]; \ +} \ + \ +static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \ + u##width val) \ +{ \ + fpr->val##width[FPR_IDX(width, idx)] = val; \ +} + +BUILD_FPR_ACCESS(32) +BUILD_FPR_ACCESS(64) + +struct loongarch_fpu { + uint64_t fcc; /* 8x8 */ + uint32_t fcsr; + uint32_t ftop; + union fpureg fpr[NUM_FPU_REGS]; +}; + +struct loongarch_lbt { + /* Scratch registers */ + unsigned long scr0; + unsigned long scr1; + unsigned long scr2; + unsigned long scr3; + /* Eflags register */ + unsigned long eflags; +}; + +#define INIT_CPUMASK { \ + {0,} \ +} + +#define ARCH_MIN_TASKALIGN 32 + +struct loongarch_vdso_info; + +/* + * If you change thread_struct remember to change the #defines below too! + */ +struct thread_struct { + /* Main processor registers. */ + unsigned long reg01, reg03, reg22; /* ra sp fp */ + unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */ + unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */ + + /* __schedule() return address / call frame address */ + unsigned long sched_ra; + unsigned long sched_cfa; + + /* CSR registers */ + unsigned long csr_prmd; + unsigned long csr_crmd; + unsigned long csr_euen; + unsigned long csr_ecfg; + unsigned long csr_badvaddr; /* Last user fault */ + + /* Other stuff associated with the thread. */ + unsigned long trap_nr; + unsigned long error_code; + unsigned long single_step; /* Used by PTRACE_SINGLESTEP */ + struct loongarch_vdso_info *vdso; + + /* + * FPU & vector registers, must be at the last of inherited + * context because they are conditionally copied at fork(). + */ + struct loongarch_fpu fpu FPU_ALIGN; + struct loongarch_lbt lbt; /* Also conditionally copied */ + + /* Hardware breakpoints pinned to this task. */ + struct perf_event *hbp_break[LOONGARCH_MAX_BRP]; + struct perf_event *hbp_watch[LOONGARCH_MAX_WRP]; +}; + +#define thread_saved_ra(tsk) (tsk->thread.sched_ra) +#define thread_saved_fp(tsk) (tsk->thread.sched_cfa) + +#define INIT_THREAD { \ + /* \ + * Main processor registers \ + */ \ + .reg01 = 0, \ + .reg03 = 0, \ + .reg22 = 0, \ + .reg23 = 0, \ + .reg24 = 0, \ + .reg25 = 0, \ + .reg26 = 0, \ + .reg27 = 0, \ + .reg28 = 0, \ + .reg29 = 0, \ + .reg30 = 0, \ + .reg31 = 0, \ + .sched_ra = 0, \ + .sched_cfa = 0, \ + .csr_crmd = 0, \ + .csr_prmd = 0, \ + .csr_euen = 0, \ + .csr_ecfg = 0, \ + .csr_badvaddr = 0, \ + /* \ + * Other stuff associated with the process \ + */ \ + .trap_nr = 0, \ + .error_code = 0, \ + /* \ + * FPU & vector registers \ + */ \ + .fpu = { \ + .fcc = 0, \ + .fcsr = 0, \ + .ftop = 0, \ + .fpr = {{{0,},},}, \ + }, \ + .hbp_break = {0}, \ + .hbp_watch = {0}, \ +} + +struct task_struct; + +enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL}; + +extern unsigned long boot_option_idle_override; +/* + * Do necessary setup to start up a newly executed thread. + */ +extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); + +unsigned long __get_wchan(struct task_struct *p); + +#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \ + THREAD_SIZE - sizeof(struct pt_regs)) +#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk)) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3]) +#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen) +#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg) + +#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);}) + +#ifdef CONFIG_CPU_HAS_PREFETCH + +#define ARCH_HAS_PREFETCH +#define prefetch(x) __builtin_prefetch((x), 0, 1) + +#define ARCH_HAS_PREFETCHW +#define prefetchw(x) __builtin_prefetch((x), 1, 1) + +#endif + +#endif /* _ASM_PROCESSOR_H */ diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h new file mode 100644 index 0000000000..f3ddaed9ef --- /dev/null +++ b/arch/loongarch/include/asm/ptrace.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_PTRACE_H +#define _ASM_PTRACE_H + +#include <asm/page.h> +#include <asm/irqflags.h> +#include <asm/thread_info.h> +#include <uapi/asm/ptrace.h> + +/* + * This struct defines the way the registers are stored on the stack during + * a system call/exception. If you add a register here, please also add it to + * regoffset_table[] in arch/loongarch/kernel/ptrace.c. + */ +struct pt_regs { + /* Main processor registers. */ + unsigned long regs[32]; + + /* Original syscall arg0. */ + unsigned long orig_a0; + + /* Special CSR registers. */ + unsigned long csr_era; + unsigned long csr_badvaddr; + unsigned long csr_crmd; + unsigned long csr_prmd; + unsigned long csr_euen; + unsigned long csr_ecfg; + unsigned long csr_estat; + unsigned long __last[]; +} __aligned(8); + +static inline int regs_irqs_disabled(struct pt_regs *regs) +{ + return arch_irqs_disabled_flags(regs->csr_prmd); +} + +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->regs[3]; +} + +/* + * Don't use asm-generic/ptrace.h it defines FP accessors that don't make + * sense on LoongArch. We rather want an error if they get invoked. + */ + +static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val) +{ + regs->csr_era = val; +} + +/* Query offset/name of register from its name/offset */ +extern int regs_query_register_offset(const char *name); +#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten. + * @offset: offset number of the register. + * + * regs_get_register returns the value of a register. The @offset is the + * offset of the register in struct pt_regs address which specified by @regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + +struct task_struct; + +/** + * regs_get_kernel_argument() - get Nth function argument in kernel + * @regs: pt_regs of that context + * @n: function argument number (start from 0) + * + * regs_get_argument() returns @n th argument of the function call. + * Note that this chooses most probably assignment, in some case + * it can be incorrect. + * This is expected to be called from kprobes or ftrace with regs + * where the top of stack is the return address. + */ +static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, + unsigned int n) +{ +#define NR_REG_ARGUMENTS 8 + static const unsigned int args[] = { + offsetof(struct pt_regs, regs[4]), + offsetof(struct pt_regs, regs[5]), + offsetof(struct pt_regs, regs[6]), + offsetof(struct pt_regs, regs[7]), + offsetof(struct pt_regs, regs[8]), + offsetof(struct pt_regs, regs[9]), + offsetof(struct pt_regs, regs[10]), + offsetof(struct pt_regs, regs[11]), + }; + + if (n < NR_REG_ARGUMENTS) + return regs_get_register(regs, args[n]); + else { + n -= NR_REG_ARGUMENTS; + return regs_get_kernel_stack_nth(regs, n); + } +} + +/* + * Does the process account for user or for system time? + */ +#define user_mode(regs) (((regs)->csr_prmd & PLV_MASK) == PLV_USER) + +static inline long regs_return_value(struct pt_regs *regs) +{ + return regs->regs[4]; +} + +static inline void regs_set_return_value(struct pt_regs *regs, unsigned long val) +{ + regs->regs[4] = val; +} + +#define instruction_pointer(regs) ((regs)->csr_era) +#define profile_pc(regs) instruction_pointer(regs) + +extern void die(const char *str, struct pt_regs *regs); + +static inline void die_if_kernel(const char *str, struct pt_regs *regs) +{ + if (unlikely(!user_mode(regs))) + die(str, regs); +} + +#define current_pt_regs() \ +({ \ + unsigned long sp = (unsigned long)__builtin_frame_address(0); \ + (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1) - 1; \ +}) + +/* Helpers for working with the user stack pointer */ + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return regs->regs[3]; +} + +static inline void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->regs[3] = val; +} + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +#define arch_has_single_step() (1) +#endif + +#endif /* _ASM_PTRACE_H */ diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h new file mode 100644 index 0000000000..34f43f8ad5 --- /dev/null +++ b/arch/loongarch/include/asm/qspinlock.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_QSPINLOCK_H +#define _ASM_QSPINLOCK_H + +#include <asm-generic/qspinlock_types.h> + +#define queued_spin_unlock queued_spin_unlock + +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + compiletime_assert_atomic_type(lock->locked); + c_sync(); + WRITE_ONCE(lock->locked, 0); +} + +#include <asm-generic/qspinlock.h> + +#endif /* _ASM_QSPINLOCK_H */ diff --git a/arch/loongarch/include/asm/regdef.h b/arch/loongarch/include/asm/regdef.h new file mode 100644 index 0000000000..49a374c261 --- /dev/null +++ b/arch/loongarch/include/asm/regdef.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_REGDEF_H +#define _ASM_REGDEF_H + +#define zero $r0 /* wired zero */ +#define ra $r1 /* return address */ +#define tp $r2 +#define sp $r3 /* stack pointer */ +#define a0 $r4 /* argument registers, a0/a1 reused as v0/v1 for return value */ +#define a1 $r5 +#define a2 $r6 +#define a3 $r7 +#define a4 $r8 +#define a5 $r9 +#define a6 $r10 +#define a7 $r11 +#define t0 $r12 /* caller saved */ +#define t1 $r13 +#define t2 $r14 +#define t3 $r15 +#define t4 $r16 +#define t5 $r17 +#define t6 $r18 +#define t7 $r19 +#define t8 $r20 +#define u0 $r21 +#define fp $r22 /* frame pointer */ +#define s0 $r23 /* callee saved */ +#define s1 $r24 +#define s2 $r25 +#define s3 $r26 +#define s4 $r27 +#define s5 $r28 +#define s6 $r29 +#define s7 $r30 +#define s8 $r31 + +#endif /* _ASM_REGDEF_H */ diff --git a/arch/loongarch/include/asm/seccomp.h b/arch/loongarch/include/asm/seccomp.h new file mode 100644 index 0000000000..31d6ab42e4 --- /dev/null +++ b/arch/loongarch/include/asm/seccomp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include <asm/unistd.h> + +#include <asm-generic/seccomp.h> + +#ifdef CONFIG_32BIT +# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_LOONGARCH32 +# define SECCOMP_ARCH_NATIVE_NR NR_syscalls +# define SECCOMP_ARCH_NATIVE_NAME "loongarch32" +#else +# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_LOONGARCH64 +# define SECCOMP_ARCH_NATIVE_NR NR_syscalls +# define SECCOMP_ARCH_NATIVE_NAME "loongarch64" +#endif + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/loongarch/include/asm/serial.h b/arch/loongarch/include/asm/serial.h new file mode 100644 index 0000000000..3fb550eb91 --- /dev/null +++ b/arch/loongarch/include/asm/serial.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM__SERIAL_H +#define __ASM__SERIAL_H + +#define BASE_BAUD 0 +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) + +#endif /* __ASM__SERIAL_H */ diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h new file mode 100644 index 0000000000..ee52fb1e99 --- /dev/null +++ b/arch/loongarch/include/asm/setup.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _LOONGARCH_SETUP_H +#define _LOONGARCH_SETUP_H + +#include <linux/types.h> +#include <asm/sections.h> +#include <uapi/asm/setup.h> + +#define VECSIZE 0x200 + +extern unsigned long eentry; +extern unsigned long tlbrentry; +extern char init_command_line[COMMAND_LINE_SIZE]; +extern void tlb_init(int cpu); +extern void cpu_cache_init(void); +extern void cache_error_setup(void); +extern void per_cpu_trap_init(int cpu); +extern void set_handler(unsigned long offset, void *addr, unsigned long len); +extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len); + +#ifdef CONFIG_RELOCATABLE + +struct rela_la_abs { + long pc; + long symvalue; +}; + +extern long __la_abs_begin; +extern long __la_abs_end; +extern long __rela_dyn_begin; +extern long __rela_dyn_end; + +extern unsigned long __init relocate_kernel(void); + +#endif + +static inline unsigned long kaslr_offset(void) +{ + return (unsigned long)&_text - VMLINUX_LOAD_ADDRESS; +} + +#endif /* __SETUP_H */ diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h new file mode 100644 index 0000000000..c9554f48d2 --- /dev/null +++ b/arch/loongarch/include/asm/shmparam.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SHMPARAM_H +#define _ASM_SHMPARAM_H + +#define __ARCH_FORCE_SHMLBA 1 + +#define SHMLBA SZ_64K /* attach addr a multiple of this */ + +#endif /* _ASM_SHMPARAM_H */ diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h new file mode 100644 index 0000000000..f81e5f01d6 --- /dev/null +++ b/arch/loongarch/include/asm/smp.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include <linux/atomic.h> +#include <linux/bitops.h> +#include <linux/linkage.h> +#include <linux/threads.h> +#include <linux/cpumask.h> + +extern int smp_num_siblings; +extern int num_processors; +extern int disabled_cpus; +extern cpumask_t cpu_sibling_map[]; +extern cpumask_t cpu_core_map[]; +extern cpumask_t cpu_foreign_map[]; + +void loongson_smp_setup(void); +void loongson_prepare_cpus(unsigned int max_cpus); +void loongson_boot_secondary(int cpu, struct task_struct *idle); +void loongson_init_secondary(void); +void loongson_smp_finish(void); +void loongson_send_ipi_single(int cpu, unsigned int action); +void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action); +#ifdef CONFIG_HOTPLUG_CPU +int loongson_cpu_disable(void); +void loongson_cpu_die(unsigned int cpu); +#endif + +static inline void plat_smp_setup(void) +{ + loongson_smp_setup(); +} + +static inline int raw_smp_processor_id(void) +{ +#if defined(__VDSO__) + extern int vdso_smp_processor_id(void) + __compiletime_error("VDSO should not call smp_processor_id()"); + return vdso_smp_processor_id(); +#else + return current_thread_info()->cpu; +#endif +} +#define raw_smp_processor_id raw_smp_processor_id + +/* Map from cpu id to sequential logical cpu number. This will only + * not be idempotent when cpus failed to come on-line. */ +extern int __cpu_number_map[NR_CPUS]; +#define cpu_number_map(cpu) __cpu_number_map[cpu] + +/* The reverse map from sequential logical cpu number to cpu id. */ +extern int __cpu_logical_map[NR_CPUS]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] + +#define cpu_physical_id(cpu) cpu_logical_map(cpu) + +#define SMP_BOOT_CPU 0x1 +#define SMP_RESCHEDULE 0x2 +#define SMP_CALL_FUNCTION 0x4 + +struct secondary_data { + unsigned long stack; + unsigned long thread_info; +}; +extern struct secondary_data cpuboot_data; + +extern asmlinkage void smpboot_entry(void); +extern asmlinkage void start_secondary(void); + +extern void calculate_cpu_foreign_map(void); + +/* + * Generate IPI list text + */ +extern void show_ipi_list(struct seq_file *p, int prec); + +static inline void arch_send_call_function_single_ipi(int cpu) +{ + loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION); +} + +static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION); +} + +#ifdef CONFIG_HOTPLUG_CPU +static inline int __cpu_disable(void) +{ + return loongson_cpu_disable(); +} + +static inline void __cpu_die(unsigned int cpu) +{ + loongson_cpu_die(cpu); +} +#endif + +#endif /* __ASM_SMP_H */ diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h new file mode 100644 index 0000000000..8d4af6aff8 --- /dev/null +++ b/arch/loongarch/include/asm/sparsemem.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LOONGARCH_SPARSEMEM_H +#define _LOONGARCH_SPARSEMEM_H + +#ifdef CONFIG_SPARSEMEM + +/* + * SECTION_SIZE_BITS 2^N: how big each section will be + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space + */ +#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */ +#define MAX_PHYSMEM_BITS 48 + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#define VMEMMAP_SIZE (sizeof(struct page) * (1UL << (cpu_pabits + 1 - PAGE_SHIFT))) +#endif + +#endif /* CONFIG_SPARSEMEM */ + +#ifndef VMEMMAP_SIZE +#define VMEMMAP_SIZE 0 /* 1, For FLATMEM; 2, For SPARSEMEM without VMEMMAP. */ +#endif + +#ifdef CONFIG_MEMORY_HOTPLUG +int memory_add_physaddr_to_nid(u64 addr); +#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid +#endif + +#define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS) + +#endif /* _LOONGARCH_SPARSEMEM_H */ diff --git a/arch/loongarch/include/asm/spinlock.h b/arch/loongarch/include/asm/spinlock.h new file mode 100644 index 0000000000..7cb3476999 --- /dev/null +++ b/arch/loongarch/include/asm/spinlock.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SPINLOCK_H +#define _ASM_SPINLOCK_H + +#include <asm/processor.h> +#include <asm/qspinlock.h> +#include <asm/qrwlock.h> + +#endif /* _ASM_SPINLOCK_H */ diff --git a/arch/loongarch/include/asm/spinlock_types.h b/arch/loongarch/include/asm/spinlock_types.h new file mode 100644 index 0000000000..7458d036c1 --- /dev/null +++ b/arch/loongarch/include/asm/spinlock_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SPINLOCK_TYPES_H +#define _ASM_SPINLOCK_TYPES_H + +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> + +#endif diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h new file mode 100644 index 0000000000..4fb1e6408b --- /dev/null +++ b/arch/loongarch/include/asm/stackframe.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_STACKFRAME_H +#define _ASM_STACKFRAME_H + +#include <linux/threads.h> + +#include <asm/addrspace.h> +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/asm-offsets.h> +#include <asm/loongarch.h> +#include <asm/thread_info.h> + +/* Make the addition of cfi info a little easier. */ + .macro cfi_rel_offset reg offset=0 docfi=0 + .if \docfi + .cfi_rel_offset \reg, \offset + .endif + .endm + + .macro cfi_st reg offset=0 docfi=0 + cfi_rel_offset \reg, \offset, \docfi + LONG_S \reg, sp, \offset + .endm + + .macro cfi_restore reg offset=0 docfi=0 + .if \docfi + .cfi_restore \reg + .endif + .endm + + .macro cfi_ld reg offset=0 docfi=0 + LONG_L \reg, sp, \offset + cfi_restore \reg \offset \docfi + .endm + +/* Jump to the runtime virtual address. */ + .macro JUMP_VIRT_ADDR temp1 temp2 + li.d \temp1, CACHE_BASE + pcaddi \temp2, 0 + or \temp1, \temp1, \temp2 + jirl zero, \temp1, 0xc + .endm + + .macro BACKUP_T0T1 + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + .endm + + .macro RELOAD_T0T1 + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + .endm + + .macro SAVE_TEMP docfi=0 + RELOAD_T0T1 + cfi_st t0, PT_R12, \docfi + cfi_st t1, PT_R13, \docfi + cfi_st t2, PT_R14, \docfi + cfi_st t3, PT_R15, \docfi + cfi_st t4, PT_R16, \docfi + cfi_st t5, PT_R17, \docfi + cfi_st t6, PT_R18, \docfi + cfi_st t7, PT_R19, \docfi + cfi_st t8, PT_R20, \docfi + .endm + + .macro SAVE_STATIC docfi=0 + cfi_st s0, PT_R23, \docfi + cfi_st s1, PT_R24, \docfi + cfi_st s2, PT_R25, \docfi + cfi_st s3, PT_R26, \docfi + cfi_st s4, PT_R27, \docfi + cfi_st s5, PT_R28, \docfi + cfi_st s6, PT_R29, \docfi + cfi_st s7, PT_R30, \docfi + cfi_st s8, PT_R31, \docfi + .endm + +/* + * get_saved_sp returns the SP for the current CPU by looking in the + * kernelsp array for it. It stores the current sp in t0 and loads the + * new value in sp. + */ + .macro get_saved_sp docfi=0 + la_abs t1, kernelsp +#ifdef CONFIG_SMP + csrrd t0, PERCPU_BASE_KS + LONG_ADD t1, t1, t0 +#endif + move t0, sp + .if \docfi + .cfi_register sp, t0 + .endif + LONG_L sp, t1, 0 + .endm + + .macro set_saved_sp stackp temp temp2 + la.pcrel \temp, kernelsp +#ifdef CONFIG_SMP + LONG_ADD \temp, \temp, u0 +#endif + LONG_S \stackp, \temp, 0 + .endm + + .macro SAVE_SOME docfi=0 + csrrd t1, LOONGARCH_CSR_PRMD + andi t1, t1, 0x3 /* extract pplv bit */ + move t0, sp + beqz t1, 8f + /* Called from user mode, new stack. */ + get_saved_sp docfi=\docfi +8: + PTR_ADDI sp, sp, -PT_SIZE + .if \docfi + .cfi_def_cfa sp, 0 + .endif + cfi_st t0, PT_R3, \docfi + cfi_rel_offset sp, PT_R3, \docfi + LONG_S zero, sp, PT_R0 + csrrd t0, LOONGARCH_CSR_PRMD + LONG_S t0, sp, PT_PRMD + csrrd t0, LOONGARCH_CSR_CRMD + LONG_S t0, sp, PT_CRMD + csrrd t0, LOONGARCH_CSR_EUEN + LONG_S t0, sp, PT_EUEN + csrrd t0, LOONGARCH_CSR_ECFG + LONG_S t0, sp, PT_ECFG + csrrd t0, LOONGARCH_CSR_ESTAT + PTR_S t0, sp, PT_ESTAT + cfi_st ra, PT_R1, \docfi + cfi_st a0, PT_R4, \docfi + cfi_st a1, PT_R5, \docfi + cfi_st a2, PT_R6, \docfi + cfi_st a3, PT_R7, \docfi + cfi_st a4, PT_R8, \docfi + cfi_st a5, PT_R9, \docfi + cfi_st a6, PT_R10, \docfi + cfi_st a7, PT_R11, \docfi + csrrd ra, LOONGARCH_CSR_ERA + LONG_S ra, sp, PT_ERA + .if \docfi + .cfi_rel_offset ra, PT_ERA + .endif + cfi_st tp, PT_R2, \docfi + cfi_st fp, PT_R22, \docfi + + /* Set thread_info if we're coming from user mode */ + csrrd t0, LOONGARCH_CSR_PRMD + andi t0, t0, 0x3 /* extract pplv bit */ + beqz t0, 9f + + li.d tp, ~_THREAD_MASK + and tp, tp, sp + cfi_st u0, PT_R21, \docfi + csrrd u0, PERCPU_BASE_KS +9: +#ifdef CONFIG_KGDB + li.w t0, CSR_CRMD_WE + csrxchg t0, t0, LOONGARCH_CSR_CRMD +#endif + .endm + + .macro SAVE_ALL docfi=0 + SAVE_SOME \docfi + SAVE_TEMP \docfi + SAVE_STATIC \docfi + .endm + + .macro RESTORE_TEMP docfi=0 + cfi_ld t0, PT_R12, \docfi + cfi_ld t1, PT_R13, \docfi + cfi_ld t2, PT_R14, \docfi + cfi_ld t3, PT_R15, \docfi + cfi_ld t4, PT_R16, \docfi + cfi_ld t5, PT_R17, \docfi + cfi_ld t6, PT_R18, \docfi + cfi_ld t7, PT_R19, \docfi + cfi_ld t8, PT_R20, \docfi + .endm + + .macro RESTORE_STATIC docfi=0 + cfi_ld s0, PT_R23, \docfi + cfi_ld s1, PT_R24, \docfi + cfi_ld s2, PT_R25, \docfi + cfi_ld s3, PT_R26, \docfi + cfi_ld s4, PT_R27, \docfi + cfi_ld s5, PT_R28, \docfi + cfi_ld s6, PT_R29, \docfi + cfi_ld s7, PT_R30, \docfi + cfi_ld s8, PT_R31, \docfi + .endm + + .macro RESTORE_SOME docfi=0 + LONG_L a0, sp, PT_PRMD + andi a0, a0, 0x3 /* extract pplv bit */ + beqz a0, 8f + cfi_ld u0, PT_R21, \docfi +8: + LONG_L a0, sp, PT_ERA + csrwr a0, LOONGARCH_CSR_ERA + LONG_L a0, sp, PT_PRMD + csrwr a0, LOONGARCH_CSR_PRMD + cfi_ld ra, PT_R1, \docfi + cfi_ld a0, PT_R4, \docfi + cfi_ld a1, PT_R5, \docfi + cfi_ld a2, PT_R6, \docfi + cfi_ld a3, PT_R7, \docfi + cfi_ld a4, PT_R8, \docfi + cfi_ld a5, PT_R9, \docfi + cfi_ld a6, PT_R10, \docfi + cfi_ld a7, PT_R11, \docfi + cfi_ld tp, PT_R2, \docfi + cfi_ld fp, PT_R22, \docfi + .endm + + .macro RESTORE_SP_AND_RET docfi=0 + cfi_ld sp, PT_R3, \docfi + ertn + .endm + + .macro RESTORE_ALL_AND_RET docfi=0 + RESTORE_STATIC \docfi + RESTORE_TEMP \docfi + RESTORE_SOME \docfi + RESTORE_SP_AND_RET \docfi + .endm + +#endif /* _ASM_STACKFRAME_H */ diff --git a/arch/loongarch/include/asm/stackprotector.h b/arch/loongarch/include/asm/stackprotector.h new file mode 100644 index 0000000000..a1a965751a --- /dev/null +++ b/arch/loongarch/include/asm/stackprotector.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GCC stack protector support. + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary and + * on LoongArch gcc expects it to be defined by a global variable called + * "__stack_chk_guard". + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h new file mode 100644 index 0000000000..f23adb15f4 --- /dev/null +++ b/arch/loongarch/include/asm/stacktrace.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_STACKTRACE_H +#define _ASM_STACKTRACE_H + +#include <asm/asm.h> +#include <asm/ptrace.h> +#include <asm/loongarch.h> +#include <linux/stringify.h> + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_IRQ, + STACK_TYPE_TASK, +}; + +struct stack_info { + enum stack_type type; + unsigned long begin, end, next_sp; +}; + +struct stack_frame { + unsigned long fp; + unsigned long ra; +}; + +bool in_irq_stack(unsigned long stack, struct stack_info *info); +bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info); +int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info); + +#define STR_LONG_L __stringify(LONG_L) +#define STR_LONG_S __stringify(LONG_S) +#define STR_LONGSIZE __stringify(LONGSIZE) + +#define STORE_ONE_REG(r) \ + STR_LONG_S " $r" __stringify(r)", %1, "STR_LONGSIZE"*"__stringify(r)"\n\t" + +#define CSRRD_ONE_REG(reg) \ + __stringify(csrrd) " %0, "__stringify(reg)"\n\t" + +static __always_inline void prepare_frametrace(struct pt_regs *regs) +{ + __asm__ __volatile__( + /* Save $ra */ + STORE_ONE_REG(1) + /* Use $ra to save PC */ + "pcaddi $ra, 0\n\t" + STR_LONG_S " $ra, %0\n\t" + /* Restore $ra */ + STR_LONG_L " $ra, %1, "STR_LONGSIZE"\n\t" + STORE_ONE_REG(2) + STORE_ONE_REG(3) + STORE_ONE_REG(4) + STORE_ONE_REG(5) + STORE_ONE_REG(6) + STORE_ONE_REG(7) + STORE_ONE_REG(8) + STORE_ONE_REG(9) + STORE_ONE_REG(10) + STORE_ONE_REG(11) + STORE_ONE_REG(12) + STORE_ONE_REG(13) + STORE_ONE_REG(14) + STORE_ONE_REG(15) + STORE_ONE_REG(16) + STORE_ONE_REG(17) + STORE_ONE_REG(18) + STORE_ONE_REG(19) + STORE_ONE_REG(20) + STORE_ONE_REG(21) + STORE_ONE_REG(22) + STORE_ONE_REG(23) + STORE_ONE_REG(24) + STORE_ONE_REG(25) + STORE_ONE_REG(26) + STORE_ONE_REG(27) + STORE_ONE_REG(28) + STORE_ONE_REG(29) + STORE_ONE_REG(30) + STORE_ONE_REG(31) + : "=m" (regs->csr_era) + : "r" (regs->regs) + : "memory"); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_BADV) : "=r" (regs->csr_badvaddr)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_CRMD) : "=r" (regs->csr_crmd)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_PRMD) : "=r" (regs->csr_prmd)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_EUEN) : "=r" (regs->csr_euen)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ECFG) : "=r" (regs->csr_ecfg)); + __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ESTAT) : "=r" (regs->csr_estat)); +} + +#endif /* _ASM_STACKTRACE_H */ diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h new file mode 100644 index 0000000000..5bb5a90d26 --- /dev/null +++ b/arch/loongarch/include/asm/string.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_STRING_H +#define _ASM_STRING_H + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, size_t __count); +extern void *__memset(void *__s, int __c, size_t __count); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); +extern void *__memcpy(void *__to, __const__ void *__from, size_t __n); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *__dest, __const__ void *__src, size_t __n); +extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); + +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) + +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * should use not instrumented version of mem* functions. + */ + +#define memset(s, c, n) __memset(s, c, n) +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + +#endif + +#endif /* _ASM_STRING_H */ diff --git a/arch/loongarch/include/asm/suspend.h b/arch/loongarch/include/asm/suspend.h new file mode 100644 index 0000000000..4025c9d5d7 --- /dev/null +++ b/arch/loongarch/include/asm/suspend.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_SUSPEND_H +#define __ASM_SUSPEND_H + +void loongarch_common_suspend(void); +void loongarch_common_resume(void); +void loongarch_suspend_enter(void); +void loongarch_wakeup_start(void); + +#endif diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h new file mode 100644 index 0000000000..5b225aff3b --- /dev/null +++ b/arch/loongarch/include/asm/switch_to.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SWITCH_TO_H +#define _ASM_SWITCH_TO_H + +#include <asm/cpu-features.h> +#include <asm/fpu.h> +#include <asm/lbt.h> + +struct task_struct; + +/** + * __switch_to - switch execution of a task + * @prev: The task previously executed. + * @next: The task to begin executing. + * @next_ti: task_thread_info(next). + * @sched_ra: __schedule return address. + * @sched_cfa: __schedule call frame address. + * + * This function is used whilst scheduling to save the context of prev & load + * the context of next. Returns prev. + */ +extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next, struct thread_info *next_ti, + void *sched_ra, void *sched_cfa); + +/* + * For newly created kernel threads switch_to() will return to + * ret_from_kernel_thread, newly created user threads to ret_from_fork. + * That is, everything following __switch_to() will be skipped for new threads. + * So everything that matters to new threads should be placed before __switch_to(). + */ +#define switch_to(prev, next, last) \ +do { \ + lose_fpu_inatomic(1, prev); \ + lose_lbt_inatomic(1, prev); \ + hw_breakpoint_thread_switch(next); \ + (last) = __switch_to(prev, next, task_thread_info(next), \ + __builtin_return_address(0), __builtin_frame_address(0)); \ +} while (0) + +#endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h new file mode 100644 index 0000000000..e286dc5847 --- /dev/null +++ b/arch/loongarch/include/asm/syscall.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_SYSCALL_H +#define __ASM_LOONGARCH_SYSCALL_H + +#include <linux/compiler.h> +#include <uapi/linux/audit.h> +#include <linux/elf-em.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/uaccess.h> +#include <asm/ptrace.h> +#include <asm/unistd.h> + +extern void *sys_call_table[]; + +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[11]; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->regs[4] = regs->orig_a0; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->regs[4]; + + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[4]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->regs[4] = (long) error ? error : val; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + args[0] = regs->orig_a0; + memcpy(&args[1], ®s->regs[5], 5 * sizeof(long)); +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_LOONGARCH64; +} + +static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) +{ + return false; +} + +#endif /* __ASM_LOONGARCH_SYSCALL_H */ diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h new file mode 100644 index 0000000000..8cb653d49a --- /dev/null +++ b/arch/loongarch/include/asm/thread_info.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * thread_info.h: LoongArch low-level thread information + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include <asm/processor.h> + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ +struct thread_info { + struct task_struct *task; /* main task structure */ + unsigned long flags; /* low level flags */ + unsigned long tp_value; /* thread pointer */ + __u32 cpu; /* current CPU */ + int preempt_count; /* 0 => preemptible, <0 => BUG */ + struct pt_regs *regs; + unsigned long syscall; /* syscall number */ + unsigned long syscall_work; /* SYSCALL_WORK_ flags */ +}; + +/* + * macros/functions for gaining access to the thread information structure + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .flags = _TIF_FIXADE, \ + .cpu = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ +} + +/* How to get the thread information struct from C. */ +register struct thread_info *__current_thread_info __asm__("$tp"); + +static inline struct thread_info *current_thread_info(void) +{ + return __current_thread_info; +} + +register unsigned long current_stack_pointer __asm__("$sp"); + +#endif /* !__ASSEMBLY__ */ + +/* thread information allocation */ +#define THREAD_SIZE SZ_16K +#define THREAD_MASK (THREAD_SIZE - 1UL) +#define THREAD_SIZE_ORDER ilog2(THREAD_SIZE / PAGE_SIZE) +/* + * thread information flags + * - these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */ +#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ +#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ +#define TIF_NOHZ 6 /* in adaptive nohz mode */ +#define TIF_UPROBE 7 /* breakpointed or singlestepping */ +#define TIF_USEDFPU 8 /* FPU was used by this task this quantum (SMP) */ +#define TIF_USEDSIMD 9 /* SIMD has been used this quantum */ +#define TIF_MEMDIE 10 /* is terminating due to OOM killer */ +#define TIF_FIXADE 11 /* Fix address errors in software */ +#define TIF_LOGADE 12 /* Log address errors to syslog */ +#define TIF_32BIT_REGS 13 /* 32-bit general purpose registers */ +#define TIF_32BIT_ADDR 14 /* 32-bit address space */ +#define TIF_LOAD_WATCH 15 /* If set, load watch registers */ +#define TIF_SINGLESTEP 16 /* Single Step */ +#define TIF_LSX_CTX_LIVE 17 /* LSX context must be preserved */ +#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */ +#define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */ +#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */ + +#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL) +#define _TIF_NOHZ (1<<TIF_NOHZ) +#define _TIF_UPROBE (1<<TIF_UPROBE) +#define _TIF_USEDFPU (1<<TIF_USEDFPU) +#define _TIF_USEDSIMD (1<<TIF_USEDSIMD) +#define _TIF_FIXADE (1<<TIF_FIXADE) +#define _TIF_LOGADE (1<<TIF_LOGADE) +#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) +#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR) +#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) +#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) +#define _TIF_LSX_CTX_LIVE (1<<TIF_LSX_CTX_LIVE) +#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE) +#define _TIF_USEDLBT (1<<TIF_USEDLBT) +#define _TIF_LBT_CTX_LIVE (1<<TIF_LBT_CTX_LIVE) + +#endif /* __KERNEL__ */ +#endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/loongarch/include/asm/time.h b/arch/loongarch/include/asm/time.h new file mode 100644 index 0000000000..037a2d1b8f --- /dev/null +++ b/arch/loongarch/include/asm/time.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_TIME_H +#define _ASM_TIME_H + +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <asm/loongarch.h> + +extern u64 cpu_clock_freq; +extern u64 const_clock_freq; + +extern void save_counter(void); +extern void sync_counter(void); + +static inline unsigned int calc_const_freq(void) +{ + unsigned int res; + unsigned int base_freq; + unsigned int cfm, cfd; + + res = read_cpucfg(LOONGARCH_CPUCFG2); + if (!(res & CPUCFG2_LLFTP)) + return 0; + + base_freq = read_cpucfg(LOONGARCH_CPUCFG4); + res = read_cpucfg(LOONGARCH_CPUCFG5); + cfm = res & 0xffff; + cfd = (res >> 16) & 0xffff; + + if (!base_freq || !cfm || !cfd) + return 0; + + return (base_freq * cfm / cfd); +} + +/* + * Initialize the calling CPU's timer interrupt as clockevent device + */ +extern int constant_clockevent_init(void); +extern int constant_clocksource_init(void); + +static inline void clockevent_set_clock(struct clock_event_device *cd, + unsigned int clock) +{ + clockevents_calc_mult_shift(cd, clock, 4); +} + +#endif /* _ASM_TIME_H */ diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h new file mode 100644 index 0000000000..fb41e9e7a2 --- /dev/null +++ b/arch/loongarch/include/asm/timex.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_TIMEX_H +#define _ASM_TIMEX_H + +#ifdef __KERNEL__ + +#include <linux/compiler.h> + +#include <asm/cpu.h> +#include <asm/cpu-features.h> + +typedef unsigned long cycles_t; + +#define get_cycles get_cycles + +static inline cycles_t get_cycles(void) +{ + return drdtime(); +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_TIMEX_H */ diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h new file mode 100644 index 0000000000..da7a3b5b93 --- /dev/null +++ b/arch/loongarch/include/asm/tlb.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_TLB_H +#define __ASM_TLB_H + +#include <linux/mm_types.h> +#include <asm/cpu-features.h> +#include <asm/loongarch.h> + +/* + * TLB Invalidate Flush + */ +static inline void tlbclr(void) +{ + __asm__ __volatile__("tlbclr"); +} + +static inline void tlbflush(void) +{ + __asm__ __volatile__("tlbflush"); +} + +/* + * TLB R/W operations. + */ +static inline void tlb_probe(void) +{ + __asm__ __volatile__("tlbsrch"); +} + +static inline void tlb_read(void) +{ + __asm__ __volatile__("tlbrd"); +} + +static inline void tlb_write_indexed(void) +{ + __asm__ __volatile__("tlbwr"); +} + +static inline void tlb_write_random(void) +{ + __asm__ __volatile__("tlbfill"); +} + +enum invtlb_ops { + /* Invalid all tlb */ + INVTLB_ALL = 0x0, + /* Invalid current tlb */ + INVTLB_CURRENT_ALL = 0x1, + /* Invalid all global=1 lines in current tlb */ + INVTLB_CURRENT_GTRUE = 0x2, + /* Invalid all global=0 lines in current tlb */ + INVTLB_CURRENT_GFALSE = 0x3, + /* Invalid global=0 and matched asid lines in current tlb */ + INVTLB_GFALSE_AND_ASID = 0x4, + /* Invalid addr with global=0 and matched asid in current tlb */ + INVTLB_ADDR_GFALSE_AND_ASID = 0x5, + /* Invalid addr with global=1 or matched asid in current tlb */ + INVTLB_ADDR_GTRUE_OR_ASID = 0x6, + /* Invalid matched gid in guest tlb */ + INVGTLB_GID = 0x9, + /* Invalid global=1, matched gid in guest tlb */ + INVGTLB_GID_GTRUE = 0xa, + /* Invalid global=0, matched gid in guest tlb */ + INVGTLB_GID_GFALSE = 0xb, + /* Invalid global=0, matched gid and asid in guest tlb */ + INVGTLB_GID_GFALSE_ASID = 0xc, + /* Invalid global=0 , matched gid, asid and addr in guest tlb */ + INVGTLB_GID_GFALSE_ASID_ADDR = 0xd, + /* Invalid global=1 , matched gid, asid and addr in guest tlb */ + INVGTLB_GID_GTRUE_ASID_ADDR = 0xe, + /* Invalid all gid gva-->gpa guest tlb */ + INVGTLB_ALLGID_GVA_TO_GPA = 0x10, + /* Invalid all gid gpa-->hpa tlb */ + INVTLB_ALLGID_GPA_TO_HPA = 0x11, + /* Invalid all gid tlb, including gva-->gpa and gpa-->hpa */ + INVTLB_ALLGID = 0x12, + /* Invalid matched gid gva-->gpa guest tlb */ + INVGTLB_GID_GVA_TO_GPA = 0x13, + /* Invalid matched gid gpa-->hpa tlb */ + INVTLB_GID_GPA_TO_HPA = 0x14, + /* Invalid matched gid tlb,including gva-->gpa and gpa-->hpa */ + INVTLB_GID_ALL = 0x15, + /* Invalid matched gid and addr gpa-->hpa tlb */ + INVTLB_GID_ADDR = 0x16, +}; + +static __always_inline void invtlb(u32 op, u32 info, u64 addr) +{ + __asm__ __volatile__( + "invtlb %0, %1, %2\n\t" + : + : "i"(op), "r"(info), "r"(addr) + : "memory" + ); +} + +static __always_inline void invtlb_addr(u32 op, u32 info, u64 addr) +{ + BUILD_BUG_ON(!__builtin_constant_p(info) || info != 0); + __asm__ __volatile__( + "invtlb %0, $zero, %1\n\t" + : + : "i"(op), "r"(addr) + : "memory" + ); +} + +static __always_inline void invtlb_info(u32 op, u32 info, u64 addr) +{ + BUILD_BUG_ON(!__builtin_constant_p(addr) || addr != 0); + __asm__ __volatile__( + "invtlb %0, %1, $zero\n\t" + : + : "i"(op), "r"(info) + : "memory" + ); +} + +static __always_inline void invtlb_all(u32 op, u32 info, u64 addr) +{ + BUILD_BUG_ON(!__builtin_constant_p(info) || info != 0); + BUILD_BUG_ON(!__builtin_constant_p(addr) || addr != 0); + __asm__ __volatile__( + "invtlb %0, $zero, $zero\n\t" + : + : "i"(op) + : "memory" + ); +} + +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) + +static void tlb_flush(struct mmu_gather *tlb); + +#define tlb_flush tlb_flush +#include <asm-generic/tlb.h> + +static inline void tlb_flush(struct mmu_gather *tlb) +{ + struct vm_area_struct vma; + + vma.vm_mm = tlb->mm; + vm_flags_init(&vma, 0); + if (tlb->fullmm) { + flush_tlb_mm(tlb->mm); + return; + } + + flush_tlb_range(&vma, tlb->start, tlb->end); +} + +extern void handle_tlb_load(void); +extern void handle_tlb_store(void); +extern void handle_tlb_modify(void); +extern void handle_tlb_refill(void); +extern void handle_tlb_protect(void); +extern void handle_tlb_load_ptw(void); +extern void handle_tlb_store_ptw(void); +extern void handle_tlb_modify_ptw(void); + +extern void dump_tlb_all(void); +extern void dump_tlb_regs(void); + +#endif /* __ASM_TLB_H */ diff --git a/arch/loongarch/include/asm/tlbflush.h b/arch/loongarch/include/asm/tlbflush.h new file mode 100644 index 0000000000..a0785e5906 --- /dev/null +++ b/arch/loongarch/include/asm/tlbflush.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_TLBFLUSH_H +#define __ASM_TLBFLUSH_H + +#include <linux/mm.h> + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void local_flush_tlb_all(void); +extern void local_flush_tlb_user(void); +extern void local_flush_tlb_kernel(void); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern void local_flush_tlb_one(unsigned long vaddr); + +#ifdef CONFIG_SMP + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long); +extern void flush_tlb_kernel_range(unsigned long, unsigned long); +extern void flush_tlb_page(struct vm_area_struct *, unsigned long); +extern void flush_tlb_one(unsigned long vaddr); + +#else /* CONFIG_SMP */ + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) +#define flush_tlb_kernel_range(vmaddr, end) local_flush_tlb_kernel_range(vmaddr, end) +#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) +#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) + +#endif /* CONFIG_SMP */ + +#endif /* __ASM_TLBFLUSH_H */ diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h new file mode 100644 index 0000000000..66128dec0b --- /dev/null +++ b/arch/loongarch/include/asm/topology.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_TOPOLOGY_H +#define __ASM_TOPOLOGY_H + +#include <linux/smp.h> + +#ifdef CONFIG_NUMA + +extern cpumask_t cpus_on_node[]; + +#define cpumask_of_node(node) (&cpus_on_node[node]) + +struct pci_bus; +extern int pcibus_to_node(struct pci_bus *); + +#define cpumask_of_pcibus(bus) (cpu_online_mask) + +extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES]; + +void numa_set_distance(int from, int to, int distance); + +#define node_distance(from, to) (node_distances[(from)][(to)]) + +#else +#define pcibus_to_node(bus) 0 +#endif + +#ifdef CONFIG_SMP +#define topology_physical_package_id(cpu) (cpu_data[cpu].package) +#define topology_core_id(cpu) (cpu_data[cpu].core) +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) +#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu]) +#endif + +#include <asm-generic/topology.h> + +static inline void arch_fix_phys_package_id(int num, u32 slot) { } +#endif /* __ASM_TOPOLOGY_H */ diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h new file mode 100644 index 0000000000..baf15a0dcf --- /dev/null +++ b/arch/loongarch/include/asm/types.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_TYPES_H +#define _ASM_TYPES_H + +#include <asm-generic/int-ll64.h> +#include <uapi/asm/types.h> + +#ifdef __ASSEMBLY__ +#define _ULCAST_ +#define _U64CAST_ +#else +#define _ULCAST_ (unsigned long) +#define _U64CAST_ (u64) +#endif + +#endif /* _ASM_TYPES_H */ diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h new file mode 100644 index 0000000000..0d22991ae4 --- /dev/null +++ b/arch/loongarch/include/asm/uaccess.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2014, Imagination Technologies Ltd. + */ +#ifndef _ASM_UACCESS_H +#define _ASM_UACCESS_H + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/extable.h> +#include <asm/pgtable.h> +#include <asm/extable.h> +#include <asm/asm-extable.h> +#include <asm-generic/access_ok.h> + +extern u64 __ua_limit; + +#define __UA_ADDR ".dword" +#define __UA_LIMIT __ua_limit + +/* + * get_user: - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \ + ((x) = 0, -EFAULT); \ +}) + +/* + * put_user: - Write a simple value into user space. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \ +}) + +/* + * __get_user: - Get a simple variable from user space, with less checking. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define __get_user(x, ptr) \ +({ \ + int __gu_err = 0; \ + \ + __chk_user_ptr(ptr); \ + __get_user_common((x), sizeof(*(ptr)), ptr); \ + __gu_err; \ +}) + +/* + * __put_user: - Write a simple value into user space, with less checking. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + */ +#define __put_user(x, ptr) \ +({ \ + int __pu_err = 0; \ + __typeof__(*(ptr)) __pu_val; \ + \ + __pu_val = (x); \ + __chk_user_ptr(ptr); \ + __put_user_common(ptr, sizeof(*(ptr))); \ + __pu_err; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __get_user_common(val, size, ptr) \ +do { \ + switch (size) { \ + case 1: __get_data_asm(val, "ld.b", ptr); break; \ + case 2: __get_data_asm(val, "ld.h", ptr); break; \ + case 4: __get_data_asm(val, "ld.w", ptr); break; \ + case 8: __get_data_asm(val, "ld.d", ptr); break; \ + default: BUILD_BUG(); break; \ + } \ +} while (0) + +#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr) + +#define __get_data_asm(val, insn, ptr) \ +{ \ + long __gu_tmp; \ + \ + __asm__ __volatile__( \ + "1: " insn " %1, %2 \n" \ + "2: \n" \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \ + : "+r" (__gu_err), "=r" (__gu_tmp) \ + : "m" (__m(ptr))); \ + \ + (val) = (__typeof__(*(ptr))) __gu_tmp; \ +} + +#define __put_user_common(ptr, size) \ +do { \ + switch (size) { \ + case 1: __put_data_asm("st.b", ptr); break; \ + case 2: __put_data_asm("st.h", ptr); break; \ + case 4: __put_data_asm("st.w", ptr); break; \ + case 8: __put_data_asm("st.d", ptr); break; \ + default: BUILD_BUG(); break; \ + } \ +} while (0) + +#define __put_kernel_common(ptr, size) __put_user_common(ptr, size) + +#define __put_data_asm(insn, ptr) \ +{ \ + __asm__ __volatile__( \ + "1: " insn " %z2, %1 # __put_user_asm\n" \ + "2: \n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \ + : "+r" (__pu_err), "=m" (__m(ptr)) \ + : "Jr" (__pu_val)); \ +} + +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + int __gu_err = 0; \ + \ + __get_kernel_common(*((type *)(dst)), sizeof(type), \ + (__force type *)(src)); \ + if (unlikely(__gu_err)) \ + goto err_label; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + type __pu_val; \ + int __pu_err = 0; \ + \ + __pu_val = *(__force type *)(src); \ + __put_kernel_common(((type *)(dst)), sizeof(type)); \ + if (unlikely(__pu_err)) \ + goto err_label; \ +} while (0) + +extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n); + +static inline unsigned long __must_check +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return __copy_user(to, (__force const void *)from, n); +} + +static inline unsigned long __must_check +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return __copy_user((__force void *)to, from, n); +} + +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +/* + * __clear_user: - Zero a block of memory in user space, with less checking. + * @addr: Destination address, in user space. + * @size: Number of bytes to zero. + * + * Zero a block of memory in user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +extern unsigned long __clear_user(void __user *addr, __kernel_size_t size); + +#define clear_user(addr, n) \ +({ \ + void __user *__cl_addr = (addr); \ + unsigned long __cl_size = (n); \ + if (__cl_size && access_ok(__cl_addr, __cl_size)) \ + __cl_size = __clear_user(__cl_addr, __cl_size); \ + __cl_size; \ +}) + +extern long strncpy_from_user(char *to, const char __user *from, long n); +extern long strnlen_user(const char __user *str, long n); + +#endif /* _ASM_UACCESS_H */ diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h new file mode 100644 index 0000000000..cfddb0116a --- /dev/null +++ b/arch/loongarch/include/asm/unistd.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <uapi/asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/loongarch/include/asm/unwind.h b/arch/loongarch/include/asm/unwind.h new file mode 100644 index 0000000000..b9dce87afd --- /dev/null +++ b/arch/loongarch/include/asm/unwind.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Most of this ideas comes from x86. + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_UNWIND_H +#define _ASM_UNWIND_H + +#include <linux/sched.h> +#include <linux/ftrace.h> + +#include <asm/ptrace.h> +#include <asm/stacktrace.h> + +enum unwinder_type { + UNWINDER_GUESS, + UNWINDER_PROLOGUE, +}; + +struct unwind_state { + char type; /* UNWINDER_XXX */ + struct stack_info stack_info; + struct task_struct *task; + bool first, error, reset; + int graph_idx; + unsigned long sp, pc, ra; +}; + +bool default_next_frame(struct unwind_state *state); + +void unwind_start(struct unwind_state *state, + struct task_struct *task, struct pt_regs *regs); +bool unwind_next_frame(struct unwind_state *state); +unsigned long unwind_get_return_address(struct unwind_state *state); + +static inline bool unwind_done(struct unwind_state *state) +{ + return state->stack_info.type == STACK_TYPE_UNKNOWN; +} + +static inline bool unwind_error(struct unwind_state *state) +{ + return state->error; +} + +#define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1])) + +static inline unsigned long unwind_graph_addr(struct unwind_state *state, + unsigned long pc, unsigned long cfa) +{ + return ftrace_graph_ret_addr(state->task, &state->graph_idx, + pc, (unsigned long *)(cfa - GRAPH_FAKE_OFFSET)); +} + +static __always_inline void __unwind_start(struct unwind_state *state, + struct task_struct *task, struct pt_regs *regs) +{ + memset(state, 0, sizeof(*state)); + if (regs) { + state->sp = regs->regs[3]; + state->pc = regs->csr_era; + state->ra = regs->regs[1]; + } else if (task && task != current) { + state->sp = thread_saved_fp(task); + state->pc = thread_saved_ra(task); + state->ra = 0; + } else { + state->sp = (unsigned long)__builtin_frame_address(0); + state->pc = (unsigned long)__builtin_return_address(0); + state->ra = 0; + } + state->task = task; + get_stack_info(state->sp, state->task, &state->stack_info); + state->pc = unwind_graph_addr(state, state->pc, state->sp); +} + +static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state) +{ + return unwind_done(state) ? 0 : state->pc; +} +#endif /* _ASM_UNWIND_H */ diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h new file mode 100644 index 0000000000..c8f59983f7 --- /dev/null +++ b/arch/loongarch/include/asm/uprobes.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_LOONGARCH_UPROBES_H +#define __ASM_LOONGARCH_UPROBES_H + +#include <asm/inst.h> + +typedef u32 uprobe_opcode_t; + +#define MAX_UINSN_BYTES 8 +#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES + +#define UPROBE_SWBP_INSN larch_insn_gen_break(BRK_UPROBE_BP) +#define UPROBE_SWBP_INSN_SIZE LOONGARCH_INSN_SIZE + +#define UPROBE_XOLBP_INSN larch_insn_gen_break(BRK_UPROBE_XOLBP) + +struct arch_uprobe { + unsigned long resume_era; + u32 insn[2]; + u32 ixol[2]; + bool simulate; +}; + +struct arch_uprobe_task { + unsigned long saved_trap_nr; +}; + +#ifdef CONFIG_UPROBES +bool uprobe_breakpoint_handler(struct pt_regs *regs); +bool uprobe_singlestep_handler(struct pt_regs *regs); +#else /* !CONFIG_UPROBES */ +static inline bool uprobe_breakpoint_handler(struct pt_regs *regs) { return false; } +static inline bool uprobe_singlestep_handler(struct pt_regs *regs) { return false; } +#endif /* CONFIG_UPROBES */ + +#endif /* __ASM_LOONGARCH_UPROBES_H */ diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h new file mode 100644 index 0000000000..d3ba35eb23 --- /dev/null +++ b/arch/loongarch/include/asm/vdso.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_VDSO_H +#define __ASM_VDSO_H + +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <vdso/datapage.h> + +#include <asm/barrier.h> + +/* + * struct loongarch_vdso_info - Details of a VDSO image. + * @vdso: Pointer to VDSO image (page-aligned). + * @size: Size of the VDSO image (page-aligned). + * @off_rt_sigreturn: Offset of the rt_sigreturn() trampoline. + * @code_mapping: Special mapping structure for vdso code. + * @code_mapping: Special mapping structure for vdso data. + * + * This structure contains details of a VDSO image, including the image data + * and offsets of certain symbols required by the kernel. It is generated as + * part of the VDSO build process, aside from the mapping page array, which is + * populated at runtime. + */ +struct loongarch_vdso_info { + void *vdso; + unsigned long size; + unsigned long offset_sigreturn; + struct vm_special_mapping code_mapping; + struct vm_special_mapping data_mapping; +}; + +extern struct loongarch_vdso_info vdso_info; + +#endif /* __ASM_VDSO_H */ diff --git a/arch/loongarch/include/asm/vdso/clocksource.h b/arch/loongarch/include/asm/vdso/clocksource.h new file mode 100644 index 0000000000..13cd580d40 --- /dev/null +++ b/arch/loongarch/include/asm/vdso/clocksource.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __ASM_VDSOCLOCKSOURCE_H +#define __ASM_VDSOCLOCKSOURCE_H + +#define VDSO_ARCH_CLOCKMODES \ + VDSO_CLOCKMODE_CPU + +#endif /* __ASM_VDSOCLOCKSOURCE_H */ diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h new file mode 100644 index 0000000000..89e6b222c2 --- /dev/null +++ b/arch/loongarch/include/asm/vdso/gettimeofday.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_VDSO_GETTIMEOFDAY_H +#define __ASM_VDSO_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include <asm/unistd.h> +#include <asm/vdso/vdso.h> + +#define VDSO_HAS_CLOCK_GETRES 1 + +static __always_inline long gettimeofday_fallback( + struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct __kernel_old_timeval *tv asm("a0") = _tv; + register struct timezone *tz asm("a1") = _tz; + register long nr asm("a7") = __NR_gettimeofday; + register long ret asm("a0"); + + asm volatile( + " syscall 0\n" + : "+r" (ret) + : "r" (nr), "r" (tv), "r" (tz) + : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", + "$t8", "memory"); + + return ret; +} + +static __always_inline long clock_gettime_fallback( + clockid_t _clkid, + struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long nr asm("a7") = __NR_clock_gettime; + register long ret asm("a0"); + + asm volatile( + " syscall 0\n" + : "+r" (ret) + : "r" (nr), "r" (clkid), "r" (ts) + : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", + "$t8", "memory"); + + return ret; +} + +static __always_inline int clock_getres_fallback( + clockid_t _clkid, + struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long nr asm("a7") = __NR_clock_getres; + register long ret asm("a0"); + + asm volatile( + " syscall 0\n" + : "+r" (ret) + : "r" (nr), "r" (clkid), "r" (ts) + : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", + "$t8", "memory"); + + return ret; +} + +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) +{ + uint64_t count; + + __asm__ __volatile__( + " rdtime.d %0, $zero\n" + : "=r" (count)); + + return count; +} + +static inline bool loongarch_vdso_hres_capable(void) +{ + return true; +} +#define __arch_vdso_hres_capable loongarch_vdso_hres_capable + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return (const struct vdso_data *)get_vdso_data(); +} + +#ifdef CONFIG_TIME_NS +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) +{ + return (const struct vdso_data *)(get_vdso_data() + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE); +} +#endif +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h new file mode 100644 index 0000000000..ef5770b343 --- /dev/null +++ b/arch/loongarch/include/asm/vdso/processor.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#define cpu_relax() barrier() + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h new file mode 100644 index 0000000000..5a12309d9f --- /dev/null +++ b/arch/loongarch/include/asm/vdso/vdso.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASSEMBLY__ + +#include <asm/asm.h> +#include <asm/page.h> +#include <asm/vdso.h> + +struct vdso_pcpu_data { + u32 node; +} ____cacheline_aligned_in_smp; + +struct loongarch_vdso_data { + struct vdso_pcpu_data pdata[NR_CPUS]; +}; + +/* + * The layout of vvar: + * + * high + * +---------------------+--------------------------+ + * | loongarch vdso data | LOONGARCH_VDSO_DATA_SIZE | + * +---------------------+--------------------------+ + * | time-ns vdso data | PAGE_SIZE | + * +---------------------+--------------------------+ + * | generic vdso data | PAGE_SIZE | + * +---------------------+--------------------------+ + * low + */ +#define LOONGARCH_VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data)) +#define LOONGARCH_VDSO_DATA_PAGES (LOONGARCH_VDSO_DATA_SIZE >> PAGE_SHIFT) + +enum vvar_pages { + VVAR_GENERIC_PAGE_OFFSET, + VVAR_TIMENS_PAGE_OFFSET, + VVAR_LOONGARCH_PAGES_START, + VVAR_LOONGARCH_PAGES_END = VVAR_LOONGARCH_PAGES_START + LOONGARCH_VDSO_DATA_PAGES - 1, + VVAR_NR_PAGES, +}; + +#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT) + +static inline unsigned long get_vdso_base(void) +{ + unsigned long addr; + + __asm__( + " la.pcrel %0, _start\n" + : "=r" (addr) + : + :); + + return addr; +} + +static inline unsigned long get_vdso_data(void) +{ + return get_vdso_base() - VVAR_SIZE; +} + +#endif /* __ASSEMBLY__ */ diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h new file mode 100644 index 0000000000..5de615383a --- /dev/null +++ b/arch/loongarch/include/asm/vdso/vsyscall.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_VSYSCALL_H +#define __ASM_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <linux/timekeeper_internal.h> +#include <vdso/datapage.h> + +extern struct vdso_data *vdso_data; + +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ +static __always_inline +struct vdso_data *__loongarch_get_k_vdso_data(void) +{ + return vdso_data; +} +#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data + +/* The asm-generic header needs to be included after the definitions above */ +#include <asm-generic/vdso/vsyscall.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/loongarch/include/asm/vermagic.h b/arch/loongarch/include/asm/vermagic.h new file mode 100644 index 0000000000..8b47ccfe3a --- /dev/null +++ b/arch/loongarch/include/asm/vermagic.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_VERMAGIC_H +#define _ASM_VERMAGIC_H + +#define MODULE_PROC_FAMILY "LOONGARCH " + +#ifdef CONFIG_32BIT +#define MODULE_KERNEL_TYPE "32BIT " +#elif defined CONFIG_64BIT +#define MODULE_KERNEL_TYPE "64BIT " +#endif + +#define MODULE_ARCH_VERMAGIC \ + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE + +#endif /* _ASM_VERMAGIC_H */ diff --git a/arch/loongarch/include/asm/vmalloc.h b/arch/loongarch/include/asm/vmalloc.h new file mode 100644 index 0000000000..965a0d41ac --- /dev/null +++ b/arch/loongarch/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_LOONGARCH_VMALLOC_H +#define _ASM_LOONGARCH_VMALLOC_H + +#endif /* _ASM_LOONGARCH_VMALLOC_H */ diff --git a/arch/loongarch/include/asm/xor.h b/arch/loongarch/include/asm/xor.h new file mode 100644 index 0000000000..12467fffee --- /dev/null +++ b/arch/loongarch/include/asm/xor.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2023 WANG Xuerui <git@xen0n.name> + */ +#ifndef _ASM_LOONGARCH_XOR_H +#define _ASM_LOONGARCH_XOR_H + +#include <asm/cpu-features.h> +#include <asm/xor_simd.h> + +#ifdef CONFIG_CPU_HAS_LSX +static struct xor_block_template xor_block_lsx = { + .name = "lsx", + .do_2 = xor_lsx_2, + .do_3 = xor_lsx_3, + .do_4 = xor_lsx_4, + .do_5 = xor_lsx_5, +}; + +#define XOR_SPEED_LSX() \ + do { \ + if (cpu_has_lsx) \ + xor_speed(&xor_block_lsx); \ + } while (0) +#else /* CONFIG_CPU_HAS_LSX */ +#define XOR_SPEED_LSX() +#endif /* CONFIG_CPU_HAS_LSX */ + +#ifdef CONFIG_CPU_HAS_LASX +static struct xor_block_template xor_block_lasx = { + .name = "lasx", + .do_2 = xor_lasx_2, + .do_3 = xor_lasx_3, + .do_4 = xor_lasx_4, + .do_5 = xor_lasx_5, +}; + +#define XOR_SPEED_LASX() \ + do { \ + if (cpu_has_lasx) \ + xor_speed(&xor_block_lasx); \ + } while (0) +#else /* CONFIG_CPU_HAS_LASX */ +#define XOR_SPEED_LASX() +#endif /* CONFIG_CPU_HAS_LASX */ + +/* + * For grins, also test the generic routines. + * + * More importantly: it cannot be ruled out at this point of time, that some + * future (maybe reduced) models could run the vector algorithms slower than + * the scalar ones, maybe for errata or micro-op reasons. It may be + * appropriate to revisit this after one or two more uarch generations. + */ +#include <asm-generic/xor.h> + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ +do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + XOR_SPEED_LSX(); \ + XOR_SPEED_LASX(); \ +} while (0) + +#endif /* _ASM_LOONGARCH_XOR_H */ diff --git a/arch/loongarch/include/asm/xor_simd.h b/arch/loongarch/include/asm/xor_simd.h new file mode 100644 index 0000000000..471b96332f --- /dev/null +++ b/arch/loongarch/include/asm/xor_simd.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2023 WANG Xuerui <git@xen0n.name> + */ +#ifndef _ASM_LOONGARCH_XOR_SIMD_H +#define _ASM_LOONGARCH_XOR_SIMD_H + +#ifdef CONFIG_CPU_HAS_LSX +void xor_lsx_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +void xor_lsx_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, const unsigned long * __restrict p3); +void xor_lsx_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +void xor_lsx_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, const unsigned long * __restrict p3, + const unsigned long * __restrict p4, const unsigned long * __restrict p5); +#endif /* CONFIG_CPU_HAS_LSX */ + +#ifdef CONFIG_CPU_HAS_LASX +void xor_lasx_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +void xor_lasx_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, const unsigned long * __restrict p3); +void xor_lasx_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +void xor_lasx_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, const unsigned long * __restrict p3, + const unsigned long * __restrict p4, const unsigned long * __restrict p5); +#endif /* CONFIG_CPU_HAS_LASX */ + +#endif /* _ASM_LOONGARCH_XOR_SIMD_H */ diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild new file mode 100644 index 0000000000..4aa680ca2e --- /dev/null +++ b/arch/loongarch/include/uapi/asm/Kbuild @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += kvm_para.h diff --git a/arch/loongarch/include/uapi/asm/auxvec.h b/arch/loongarch/include/uapi/asm/auxvec.h new file mode 100644 index 0000000000..922d9e6b50 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/auxvec.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_AUXVEC_H +#define __ASM_AUXVEC_H + +/* Location of VDSO image. */ +#define AT_SYSINFO_EHDR 33 + +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + +#endif /* __ASM_AUXVEC_H */ diff --git a/arch/loongarch/include/uapi/asm/bpf_perf_event.h b/arch/loongarch/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 0000000000..eb6e2fd2a1 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_BPF_PERF_EVENT_H__ + +#include <linux/ptrace.h> + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/arch/loongarch/include/uapi/asm/break.h b/arch/loongarch/include/uapi/asm/break.h new file mode 100644 index 0000000000..bb9b82ba59 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/break.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef __UAPI_ASM_BREAK_H +#define __UAPI_ASM_BREAK_H + +#define BRK_DEFAULT 0 /* Used as default */ +#define BRK_BUG 1 /* Used by BUG() */ +#define BRK_KDB 2 /* Used in KDB_ENTER() */ +#define BRK_MATHEMU 3 /* Used by FPU emulator */ +#define BRK_USERBP 4 /* User bp (used by debuggers) */ +#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */ +#define BRK_OVERFLOW 6 /* Overflow check */ +#define BRK_DIVZERO 7 /* Divide by zero check */ +#define BRK_RANGE 8 /* Range error check */ +#define BRK_MULOVFL 9 /* Multiply overflow */ +#define BRK_KPROBE_BP 10 /* Kprobe break */ +#define BRK_KPROBE_SSTEPBP 11 /* Kprobe single step break */ +#define BRK_UPROBE_BP 12 /* See <asm/uprobes.h> */ +#define BRK_UPROBE_XOLBP 13 /* See <asm/uprobes.h> */ + +#endif /* __UAPI_ASM_BREAK_H */ diff --git a/arch/loongarch/include/uapi/asm/byteorder.h b/arch/loongarch/include/uapi/asm/byteorder.h new file mode 100644 index 0000000000..b1722d890d --- /dev/null +++ b/arch/loongarch/include/uapi/asm/byteorder.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BYTEORDER_H +#define _ASM_BYTEORDER_H + +#include <linux/byteorder/little_endian.h> + +#endif /* _ASM_BYTEORDER_H */ diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h new file mode 100644 index 0000000000..6955a7cb2c --- /dev/null +++ b/arch/loongarch/include/uapi/asm/hwcap.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_HWCAP_H +#define _UAPI_ASM_HWCAP_H + +/* HWCAP flags */ +#define HWCAP_LOONGARCH_CPUCFG (1 << 0) +#define HWCAP_LOONGARCH_LAM (1 << 1) +#define HWCAP_LOONGARCH_UAL (1 << 2) +#define HWCAP_LOONGARCH_FPU (1 << 3) +#define HWCAP_LOONGARCH_LSX (1 << 4) +#define HWCAP_LOONGARCH_LASX (1 << 5) +#define HWCAP_LOONGARCH_CRC32 (1 << 6) +#define HWCAP_LOONGARCH_COMPLEX (1 << 7) +#define HWCAP_LOONGARCH_CRYPTO (1 << 8) +#define HWCAP_LOONGARCH_LVZ (1 << 9) +#define HWCAP_LOONGARCH_LBT_X86 (1 << 10) +#define HWCAP_LOONGARCH_LBT_ARM (1 << 11) +#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12) +#define HWCAP_LOONGARCH_PTW (1 << 13) + +#endif /* _UAPI_ASM_HWCAP_H */ diff --git a/arch/loongarch/include/uapi/asm/perf_regs.h b/arch/loongarch/include/uapi/asm/perf_regs.h new file mode 100644 index 0000000000..29d69c00fc --- /dev/null +++ b/arch/loongarch/include/uapi/asm/perf_regs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_LOONGARCH_PERF_REGS_H +#define _ASM_LOONGARCH_PERF_REGS_H + +enum perf_event_loongarch_regs { + PERF_REG_LOONGARCH_PC, + PERF_REG_LOONGARCH_R1, + PERF_REG_LOONGARCH_R2, + PERF_REG_LOONGARCH_R3, + PERF_REG_LOONGARCH_R4, + PERF_REG_LOONGARCH_R5, + PERF_REG_LOONGARCH_R6, + PERF_REG_LOONGARCH_R7, + PERF_REG_LOONGARCH_R8, + PERF_REG_LOONGARCH_R9, + PERF_REG_LOONGARCH_R10, + PERF_REG_LOONGARCH_R11, + PERF_REG_LOONGARCH_R12, + PERF_REG_LOONGARCH_R13, + PERF_REG_LOONGARCH_R14, + PERF_REG_LOONGARCH_R15, + PERF_REG_LOONGARCH_R16, + PERF_REG_LOONGARCH_R17, + PERF_REG_LOONGARCH_R18, + PERF_REG_LOONGARCH_R19, + PERF_REG_LOONGARCH_R20, + PERF_REG_LOONGARCH_R21, + PERF_REG_LOONGARCH_R22, + PERF_REG_LOONGARCH_R23, + PERF_REG_LOONGARCH_R24, + PERF_REG_LOONGARCH_R25, + PERF_REG_LOONGARCH_R26, + PERF_REG_LOONGARCH_R27, + PERF_REG_LOONGARCH_R28, + PERF_REG_LOONGARCH_R29, + PERF_REG_LOONGARCH_R30, + PERF_REG_LOONGARCH_R31, + PERF_REG_LOONGARCH_MAX, +}; +#endif /* _ASM_LOONGARCH_PERF_REGS_H */ diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h new file mode 100644 index 0000000000..ac915f8416 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/ptrace.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _UAPI_ASM_PTRACE_H +#define _UAPI_ASM_PTRACE_H + +#include <linux/types.h> + +#ifndef __KERNEL__ +#include <stdint.h> +#endif + +/* + * For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs, + * 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR. + */ +#define GPR_BASE 0 +#define GPR_NUM 32 +#define GPR_END (GPR_BASE + GPR_NUM - 1) +#define ARG0 (GPR_END + 1) +#define PC (GPR_END + 2) +#define BADVADDR (GPR_END + 3) + +#define NUM_FPU_REGS 32 + +struct user_pt_regs { + /* Main processor registers. */ + unsigned long regs[32]; + + /* Original syscall arg0. */ + unsigned long orig_a0; + + /* Special CSR registers. */ + unsigned long csr_era; + unsigned long csr_badv; + unsigned long reserved[10]; +} __attribute__((aligned(8))); + +struct user_fp_state { + uint64_t fpr[32]; + uint64_t fcc; + uint32_t fcsr; +}; + +struct user_lsx_state { + /* 32 registers, 128 bits width per register. */ + uint64_t vregs[32*2]; +}; + +struct user_lasx_state { + /* 32 registers, 256 bits width per register. */ + uint64_t vregs[32*4]; +}; + +struct user_lbt_state { + uint64_t scr[4]; + uint32_t eflags; + uint32_t ftop; +}; + +struct user_watch_state { + uint64_t dbg_info; + struct { + uint64_t addr; + uint64_t mask; + uint32_t ctrl; + uint32_t pad; + } dbg_regs[8]; +}; + +#define PTRACE_SYSEMU 0x1f +#define PTRACE_SYSEMU_SINGLESTEP 0x20 + +#endif /* _UAPI_ASM_PTRACE_H */ diff --git a/arch/loongarch/include/uapi/asm/reg.h b/arch/loongarch/include/uapi/asm/reg.h new file mode 100644 index 0000000000..90ad910c60 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/reg.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Various register offset definitions for debuggers, core file + * examiners and whatnot. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_REG_H +#define __UAPI_ASM_LOONGARCH_REG_H + +#define LOONGARCH_EF_R0 0 +#define LOONGARCH_EF_R1 1 +#define LOONGARCH_EF_R2 2 +#define LOONGARCH_EF_R3 3 +#define LOONGARCH_EF_R4 4 +#define LOONGARCH_EF_R5 5 +#define LOONGARCH_EF_R6 6 +#define LOONGARCH_EF_R7 7 +#define LOONGARCH_EF_R8 8 +#define LOONGARCH_EF_R9 9 +#define LOONGARCH_EF_R10 10 +#define LOONGARCH_EF_R11 11 +#define LOONGARCH_EF_R12 12 +#define LOONGARCH_EF_R13 13 +#define LOONGARCH_EF_R14 14 +#define LOONGARCH_EF_R15 15 +#define LOONGARCH_EF_R16 16 +#define LOONGARCH_EF_R17 17 +#define LOONGARCH_EF_R18 18 +#define LOONGARCH_EF_R19 19 +#define LOONGARCH_EF_R20 20 +#define LOONGARCH_EF_R21 21 +#define LOONGARCH_EF_R22 22 +#define LOONGARCH_EF_R23 23 +#define LOONGARCH_EF_R24 24 +#define LOONGARCH_EF_R25 25 +#define LOONGARCH_EF_R26 26 +#define LOONGARCH_EF_R27 27 +#define LOONGARCH_EF_R28 28 +#define LOONGARCH_EF_R29 29 +#define LOONGARCH_EF_R30 30 +#define LOONGARCH_EF_R31 31 + +/* + * Saved special registers + */ +#define LOONGARCH_EF_ORIG_A0 32 +#define LOONGARCH_EF_CSR_ERA 33 +#define LOONGARCH_EF_CSR_BADV 34 +#define LOONGARCH_EF_CSR_CRMD 35 +#define LOONGARCH_EF_CSR_PRMD 36 +#define LOONGARCH_EF_CSR_EUEN 37 +#define LOONGARCH_EF_CSR_ECFG 38 +#define LOONGARCH_EF_CSR_ESTAT 39 + +#define LOONGARCH_EF_SIZE 320 /* size in bytes */ + +#endif /* __UAPI_ASM_LOONGARCH_REG_H */ diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h new file mode 100644 index 0000000000..6c22f616b8 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/sigcontext.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Author: Hanlu Li <lihanlu@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _UAPI_ASM_SIGCONTEXT_H +#define _UAPI_ASM_SIGCONTEXT_H + +#include <linux/types.h> +#include <linux/posix_types.h> + +/* FP context was used */ +#define SC_USED_FP (1 << 0) +/* Address error was due to memory load */ +#define SC_ADDRERR_RD (1 << 30) +/* Address error was due to memory store */ +#define SC_ADDRERR_WR (1 << 31) + +struct sigcontext { + __u64 sc_pc; + __u64 sc_regs[32]; + __u32 sc_flags; + __u64 sc_extcontext[0] __attribute__((__aligned__(16))); +}; + +#define CONTEXT_INFO_ALIGN 16 +struct sctx_info { + __u32 magic; + __u32 size; + __u64 padding; /* padding to 16 bytes */ +}; + +/* FPU context */ +#define FPU_CTX_MAGIC 0x46505501 +#define FPU_CTX_ALIGN 8 +struct fpu_context { + __u64 regs[32]; + __u64 fcc; + __u32 fcsr; +}; + +/* LSX context */ +#define LSX_CTX_MAGIC 0x53580001 +#define LSX_CTX_ALIGN 16 +struct lsx_context { + __u64 regs[2*32]; + __u64 fcc; + __u32 fcsr; +}; + +/* LASX context */ +#define LASX_CTX_MAGIC 0x41535801 +#define LASX_CTX_ALIGN 32 +struct lasx_context { + __u64 regs[4*32]; + __u64 fcc; + __u32 fcsr; +}; + +/* LBT context */ +#define LBT_CTX_MAGIC 0x42540001 +#define LBT_CTX_ALIGN 8 +struct lbt_context { + __u64 regs[4]; + __u32 eflags; + __u32 ftop; +}; + + +#endif /* _UAPI_ASM_SIGCONTEXT_H */ diff --git a/arch/loongarch/include/uapi/asm/signal.h b/arch/loongarch/include/uapi/asm/signal.h new file mode 100644 index 0000000000..992d965aa1 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/signal.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _UAPI_ASM_SIGNAL_H +#define _UAPI_ASM_SIGNAL_H + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 16384 + +#include <asm-generic/signal.h> + +#endif diff --git a/arch/loongarch/include/uapi/asm/ucontext.h b/arch/loongarch/include/uapi/asm/ucontext.h new file mode 100644 index 0000000000..12577e22b1 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/ucontext.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __LOONGARCH_UAPI_ASM_UCONTEXT_H +#define __LOONGARCH_UAPI_ASM_UCONTEXT_H + +/** + * struct ucontext - user context structure + * @uc_flags: + * @uc_link: + * @uc_stack: + * @uc_mcontext: holds basic processor state + * @uc_sigmask: + * @uc_extcontext: holds extended processor state + */ +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + sigset_t uc_sigmask; + /* There's some padding here to allow sigset_t to be expanded in the + * future. Though this is unlikely, other architectures put uc_sigmask + * at the end of this structure and explicitly state it can be + * expanded, so we didn't want to box ourselves in here. */ + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; + /* We can't put uc_sigmask at the end of this structure because we need + * to be able to expand sigcontext in the future. For example, the + * vector ISA extension will almost certainly add ISA state. We want + * to ensure all user-visible ISA state can be saved and restored via a + * ucontext, so we're putting this at the end in order to allow for + * infinite extensibility. Since we know this will be extended and we + * assume sigset_t won't be extended an extreme amount, we're + * prioritizing this. */ + struct sigcontext uc_mcontext; +}; + +#endif /* __LOONGARCH_UAPI_ASM_UCONTEXT_H */ diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h new file mode 100644 index 0000000000..fcb668984f --- /dev/null +++ b/arch/loongarch/include/uapi/asm/unistd.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 + +#include <asm-generic/unistd.h> |