diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /arch/nds32/include | |
parent | Initial commit. (diff) | |
download | linux-upstream.tar.xz linux-upstream.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/nds32/include')
60 files changed, 4718 insertions, 0 deletions
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild new file mode 100644 index 000000000..82a4453c9 --- /dev/null +++ b/arch/nds32/include/asm/Kbuild @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += asm-offsets.h +generic-y += cmpxchg.h +generic-y += export.h +generic-y += gpio.h +generic-y += kvm_para.h +generic-y += parport.h +generic-y += user.h diff --git a/arch/nds32/include/asm/assembler.h b/arch/nds32/include/asm/assembler.h new file mode 100644 index 000000000..5e7c56926 --- /dev/null +++ b/arch/nds32/include/asm/assembler.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_ASSEMBLER_H__ +#define __NDS32_ASSEMBLER_H__ + +.macro gie_disable + setgie.d + dsb +.endm + +.macro gie_enable + setgie.e + dsb +.endm + +.macro gie_save oldpsw + mfsr \oldpsw, $ir0 + setgie.d + dsb +.endm + +.macro gie_restore oldpsw + andi \oldpsw, \oldpsw, #0x1 + beqz \oldpsw, 7001f + setgie.e + dsb +7001: +.endm + + +#define USER(insn, reg, addr, opr) \ +9999: insn reg, addr, opr; \ + .section __ex_table,"a"; \ + .align 3; \ + .long 9999b, 9001f; \ + .previous + +#endif /* __NDS32_ASSEMBLER_H__ */ diff --git a/arch/nds32/include/asm/barrier.h b/arch/nds32/include/asm/barrier.h new file mode 100644 index 000000000..16413172f --- /dev/null +++ b/arch/nds32/include/asm/barrier.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_ASM_BARRIER_H +#define __NDS32_ASM_BARRIER_H + +#ifndef __ASSEMBLY__ +#define mb() asm volatile("msync all":::"memory") +#define rmb() asm volatile("msync all":::"memory") +#define wmb() asm volatile("msync store":::"memory") +#include <asm-generic/barrier.h> + +#endif /* __ASSEMBLY__ */ + +#endif /* __NDS32_ASM_BARRIER_H */ diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h new file mode 100644 index 000000000..b02a58e71 --- /dev/null +++ b/arch/nds32/include/asm/bitfield.h @@ -0,0 +1,985 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_BITFIELD_H__ +#define __NDS32_BITFIELD_H__ +/****************************************************************************** + * cr0: CPU_VER (CPU Version Register) + *****************************************************************************/ +#define CPU_VER_offCFGID 0 /* Minor configuration */ +#define CPU_VER_offREV 16 /* Revision of the CPU version */ +#define CPU_VER_offCPUID 24 /* Major CPU versions */ + +#define CPU_VER_mskCFGID ( 0xFFFF << CPU_VER_offCFGID ) +#define CPU_VER_mskREV ( 0xFF << CPU_VER_offREV ) +#define CPU_VER_mskCPUID ( 0xFF << CPU_VER_offCPUID ) + +/****************************************************************************** + * cr1: ICM_CFG (Instruction Cache/Memory Configuration Register) + *****************************************************************************/ +#define ICM_CFG_offISET 0 /* I-cache sets (# of cache lines) per way */ +#define ICM_CFG_offIWAY 3 /* I-cache ways */ +#define ICM_CFG_offISZ 6 /* I-cache line size */ +#define ICM_CFG_offILCK 9 /* I-cache locking support */ +#define ICM_CFG_offILMB 10 /* On-chip ILM banks */ +#define ICM_CFG_offBSAV 13 /* ILM base register alignment version */ +/* bit 15:31 reserved */ + +#define ICM_CFG_mskISET ( 0x7 << ICM_CFG_offISET ) +#define ICM_CFG_mskIWAY ( 0x7 << ICM_CFG_offIWAY ) +#define ICM_CFG_mskISZ ( 0x7 << ICM_CFG_offISZ ) +#define ICM_CFG_mskILCK ( 0x1 << ICM_CFG_offILCK ) +#define ICM_CFG_mskILMB ( 0x7 << ICM_CFG_offILMB ) +#define ICM_CFG_mskBSAV ( 0x3 << ICM_CFG_offBSAV ) + +/****************************************************************************** + * cr2: DCM_CFG (Data Cache/Memory Configuration Register) + *****************************************************************************/ +#define DCM_CFG_offDSET 0 /* D-cache sets (# of cache lines) per way */ +#define DCM_CFG_offDWAY 3 /* D-cache ways */ +#define DCM_CFG_offDSZ 6 /* D-cache line size */ +#define DCM_CFG_offDLCK 9 /* D-cache locking support */ +#define DCM_CFG_offDLMB 10 /* On-chip DLM banks */ +#define DCM_CFG_offBSAV 13 /* DLM base register alignment version */ +/* bit 15:31 reserved */ + +#define DCM_CFG_mskDSET ( 0x7 << DCM_CFG_offDSET ) +#define DCM_CFG_mskDWAY ( 0x7 << DCM_CFG_offDWAY ) +#define DCM_CFG_mskDSZ ( 0x7 << DCM_CFG_offDSZ ) +#define DCM_CFG_mskDLCK ( 0x1 << DCM_CFG_offDLCK ) +#define DCM_CFG_mskDLMB ( 0x7 << DCM_CFG_offDLMB ) +#define DCM_CFG_mskBSAV ( 0x3 << DCM_CFG_offBSAV ) + +/****************************************************************************** + * cr3: MMU_CFG (MMU Configuration Register) + *****************************************************************************/ +#define MMU_CFG_offMMPS 0 /* Memory management protection scheme */ +#define MMU_CFG_offMMPV 2 /* Memory management protection version number */ +#define MMU_CFG_offFATB 7 /* Fully-associative or non-fully-associative TLB */ + +#define MMU_CFG_offTBW 8 /* TLB ways(non-associative) TBS */ +#define MMU_CFG_offTBS 11 /* TLB sets per way(non-associative) TBS */ +/* bit 14:14 reserved */ + +#define MMU_CFG_offEP8MIN4 15 /* 8KB page supported while minimum page is 4KB */ +#define MMU_CFG_offfEPSZ 16 /* Extra page size supported */ +#define MMU_CFG_offTLBLCK 24 /* TLB locking support */ +#define MMU_CFG_offHPTWK 25 /* Hardware Page Table Walker implemented */ +#define MMU_CFG_offDE 26 /* Default endian */ +#define MMU_CFG_offNTPT 27 /* Partitions for non-translated attributes */ +#define MMU_CFG_offIVTB 28 /* Invisible TLB */ +#define MMU_CFG_offVLPT 29 /* VLPT for fast TLB fill handling implemented */ +#define MMU_CFG_offNTME 30 /* Non-translated VA to PA mapping */ +/* bit 31 reserved */ + +#define MMU_CFG_mskMMPS ( 0x3 << MMU_CFG_offMMPS ) +#define MMU_CFG_mskMMPV ( 0x1F << MMU_CFG_offMMPV ) +#define MMU_CFG_mskFATB ( 0x1 << MMU_CFG_offFATB ) +#define MMU_CFG_mskTBW ( 0x7 << MMU_CFG_offTBW ) +#define MMU_CFG_mskTBS ( 0x7 << MMU_CFG_offTBS ) +#define MMU_CFG_mskEP8MIN4 ( 0x1 << MMU_CFG_offEP8MIN4 ) +#define MMU_CFG_mskfEPSZ ( 0xFF << MMU_CFG_offfEPSZ ) +#define MMU_CFG_mskTLBLCK ( 0x1 << MMU_CFG_offTLBLCK ) +#define MMU_CFG_mskHPTWK ( 0x1 << MMU_CFG_offHPTWK ) +#define MMU_CFG_mskDE ( 0x1 << MMU_CFG_offDE ) +#define MMU_CFG_mskNTPT ( 0x1 << MMU_CFG_offNTPT ) +#define MMU_CFG_mskIVTB ( 0x1 << MMU_CFG_offIVTB ) +#define MMU_CFG_mskVLPT ( 0x1 << MMU_CFG_offVLPT ) +#define MMU_CFG_mskNTME ( 0x1 << MMU_CFG_offNTME ) + +/****************************************************************************** + * cr4: MSC_CFG (Misc Configuration Register) + *****************************************************************************/ +#define MSC_CFG_offEDM 0 +#define MSC_CFG_offLMDMA 1 +#define MSC_CFG_offPFM 2 +#define MSC_CFG_offHSMP 3 +#define MSC_CFG_offTRACE 4 +#define MSC_CFG_offDIV 5 +#define MSC_CFG_offMAC 6 +#define MSC_CFG_offAUDIO 7 +#define MSC_CFG_offL2C 9 +#define MSC_CFG_offRDREG 10 +#define MSC_CFG_offADR24 11 +#define MSC_CFG_offINTLC 12 +#define MSC_CFG_offBASEV 13 +#define MSC_CFG_offNOD 16 +/* bit 13:31 reserved */ + +#define MSC_CFG_mskEDM ( 0x1 << MSC_CFG_offEDM ) +#define MSC_CFG_mskLMDMA ( 0x1 << MSC_CFG_offLMDMA ) +#define MSC_CFG_mskPFM ( 0x1 << MSC_CFG_offPFM ) +#define MSC_CFG_mskHSMP ( 0x1 << MSC_CFG_offHSMP ) +#define MSC_CFG_mskTRACE ( 0x1 << MSC_CFG_offTRACE ) +#define MSC_CFG_mskDIV ( 0x1 << MSC_CFG_offDIV ) +#define MSC_CFG_mskMAC ( 0x1 << MSC_CFG_offMAC ) +#define MSC_CFG_mskAUDIO ( 0x3 << MSC_CFG_offAUDIO ) +#define MSC_CFG_mskL2C ( 0x1 << MSC_CFG_offL2C ) +#define MSC_CFG_mskRDREG ( 0x1 << MSC_CFG_offRDREG ) +#define MSC_CFG_mskADR24 ( 0x1 << MSC_CFG_offADR24 ) +#define MSC_CFG_mskINTLC ( 0x1 << MSC_CFG_offINTLC ) +#define MSC_CFG_mskBASEV ( 0x7 << MSC_CFG_offBASEV ) +#define MSC_CFG_mskNOD ( 0x1 << MSC_CFG_offNOD ) + +/****************************************************************************** + * cr5: CORE_CFG (Core Identification Register) + *****************************************************************************/ +#define CORE_ID_offCOREID 0 +/* bit 4:31 reserved */ + +#define CORE_ID_mskCOREID ( 0xF << CORE_ID_offCOREID ) + +/****************************************************************************** + * cr6: FUCOP_EXIST (FPU and Coprocessor Existence Configuration Register) + *****************************************************************************/ +#define FUCOP_EXIST_offCP0EX 0 +#define FUCOP_EXIST_offCP1EX 1 +#define FUCOP_EXIST_offCP2EX 2 +#define FUCOP_EXIST_offCP3EX 3 +#define FUCOP_EXIST_offCP0ISFPU 31 + +#define FUCOP_EXIST_mskCP0EX ( 0x1 << FUCOP_EXIST_offCP0EX ) +#define FUCOP_EXIST_mskCP1EX ( 0x1 << FUCOP_EXIST_offCP1EX ) +#define FUCOP_EXIST_mskCP2EX ( 0x1 << FUCOP_EXIST_offCP2EX ) +#define FUCOP_EXIST_mskCP3EX ( 0x1 << FUCOP_EXIST_offCP3EX ) +#define FUCOP_EXIST_mskCP0ISFPU ( 0x1 << FUCOP_EXIST_offCP0ISFPU ) + +/****************************************************************************** + * ir0: PSW (Processor Status Word Register) + * ir1: IPSW (Interruption PSW Register) + * ir2: P_IPSW (Previous IPSW Register) + *****************************************************************************/ +#define PSW_offGIE 0 /* Global Interrupt Enable */ +#define PSW_offINTL 1 /* Interruption Stack Level */ +#define PSW_offPOM 3 /* Processor Operation Mode, User/Superuser */ +#define PSW_offBE 5 /* Endianness for data memory access, 1:MSB, 0:LSB */ +#define PSW_offIT 6 /* Enable instruction address translation */ +#define PSW_offDT 7 /* Enable data address translation */ +#define PSW_offIME 8 /* Instruction Machine Error flag */ +#define PSW_offDME 9 /* Data Machine Error flag */ +#define PSW_offDEX 10 /* Debug Exception */ +#define PSW_offHSS 11 /* Hardware Single Stepping */ +#define PSW_offDRBE 12 /* Device Register Endian Mode */ +#define PSW_offAEN 13 /* Audio ISA special feature */ +#define PSW_offWBNA 14 /* Write Back Non-Allocate */ +#define PSW_offIFCON 15 /* IFC On */ +#define PSW_offCPL 16 /* Current Priority Level */ +/* bit 19:31 reserved */ + +#define PSW_mskGIE ( 0x1 << PSW_offGIE ) +#define PSW_mskINTL ( 0x3 << PSW_offINTL ) +#define PSW_mskPOM ( 0x3 << PSW_offPOM ) +#define PSW_mskBE ( 0x1 << PSW_offBE ) +#define PSW_mskIT ( 0x1 << PSW_offIT ) +#define PSW_mskDT ( 0x1 << PSW_offDT ) +#define PSW_mskIME ( 0x1 << PSW_offIME ) +#define PSW_mskDME ( 0x1 << PSW_offDME ) +#define PSW_mskDEX ( 0x1 << PSW_offDEX ) +#define PSW_mskHSS ( 0x1 << PSW_offHSS ) +#define PSW_mskDRBE ( 0x1 << PSW_offDRBE ) +#define PSW_mskAEN ( 0x1 << PSW_offAEN ) +#define PSW_mskWBNA ( 0x1 << PSW_offWBNA ) +#define PSW_mskIFCON ( 0x1 << PSW_offIFCON ) +#define PSW_mskCPL ( 0x7 << PSW_offCPL ) + +#define PSW_SYSTEM ( 1 << PSW_offPOM ) +#define PSW_INTL_1 ( 1 << PSW_offINTL ) +#define PSW_CPL_NO ( 0 << PSW_offCPL ) +#define PSW_CPL_ANY ( 7 << PSW_offCPL ) + +#define PSW_clr (PSW_mskGIE|PSW_mskINTL|PSW_mskPOM|PSW_mskIT|PSW_mskDT|PSW_mskIME|PSW_mskWBNA) +#ifdef __NDS32_EB__ +#ifdef CONFIG_WBNA +#define PSW_init (PSW_mskWBNA|(1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT|PSW_mskBE) +#else +#define PSW_init ((1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT|PSW_mskBE) +#endif +#else +#ifdef CONFIG_WBNA +#define PSW_init (PSW_mskWBNA|(1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT) +#else +#define PSW_init ((1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT) +#endif +#endif +/****************************************************************************** + * ir3: IVB (Interruption Vector Base Register) + *****************************************************************************/ +/* bit 0:12 reserved */ +#define IVB_offNIVIC 1 /* Number of input for IVIC Controller */ +#define IVB_offIVIC_VER 11 /* IVIC Version */ +#define IVB_offEVIC 13 /* External Vector Interrupt Controller mode */ +#define IVB_offESZ 14 /* Size of each vector entry */ +#define IVB_offIVBASE 16 /* BasePA of interrupt vector table */ + +#define IVB_mskNIVIC ( 0x7 << IVB_offNIVIC ) +#define IVB_mskIVIC_VER ( 0x3 << IVB_offIVIC_VER ) +#define IVB_mskEVIC ( 0x1 << IVB_offEVIC ) +#define IVB_mskESZ ( 0x3 << IVB_offESZ ) +#define IVB_mskIVBASE ( 0xFFFF << IVB_offIVBASE ) + +#define IVB_valESZ4 0 +#define IVB_valESZ16 1 +#define IVB_valESZ64 2 +#define IVB_valESZ256 3 +/****************************************************************************** + * ir4: EVA (Exception Virtual Address Register) + * ir5: P_EVA (Previous EVA Register) + *****************************************************************************/ + + /* This register contains the VA that causes the exception */ + +/****************************************************************************** + * ir6: ITYPE (Interruption Type Register) + * ir7: P_ITYPE (Previous ITYPE Register) + *****************************************************************************/ +#define ITYPE_offETYPE 0 /* Exception Type */ +#define ITYPE_offINST 4 /* Exception caused by insn fetch or data access */ +/* bit 5:15 reserved */ +#define ITYPE_offVECTOR 5 /* Vector */ +#define ITYPE_offSWID 16 /* SWID of debugging exception */ +/* bit 31:31 reserved */ + +#define ITYPE_mskETYPE ( 0xF << ITYPE_offETYPE ) +#define ITYPE_mskINST ( 0x1 << ITYPE_offINST ) +#define ITYPE_mskVECTOR ( 0x7F << ITYPE_offVECTOR ) +#define ITYPE_mskSWID ( 0x7FFF << ITYPE_offSWID ) + +/* Additional definitions for ITYPE register */ +#define ITYPE_offSTYPE 16 /* Arithmetic Sub Type */ +#define ITYPE_offCPID 20 /* Co-Processor ID which generate the exception */ + +#define ITYPE_mskSTYPE ( 0xF << ITYPE_offSTYPE ) +#define ITYPE_mskCPID ( 0x3 << ITYPE_offCPID ) + +/* Additional definitions of ITYPE register for FPU */ +#define FPU_DISABLE_EXCEPTION (0x1 << ITYPE_offSTYPE) +#define FPU_EXCEPTION (0x2 << ITYPE_offSTYPE) +#define FPU_CPID 0 /* FPU Co-Processor ID is 0 */ + +#define NDS32_VECTOR_mskNONEXCEPTION 0x78 +#define NDS32_VECTOR_offEXCEPTION 8 +#define NDS32_VECTOR_offINTERRUPT 9 + +/* Interrupt vector entry */ +#define ENTRY_RESET_NMI 0 +#define ENTRY_TLB_FILL 1 +#define ENTRY_PTE_NOT_PRESENT 2 +#define ENTRY_TLB_MISC 3 +#define ENTRY_TLB_VLPT_MISS 4 +#define ENTRY_MACHINE_ERROR 5 +#define ENTRY_DEBUG_RELATED 6 +#define ENTRY_GENERAL_EXCPETION 7 +#define ENTRY_SYSCALL 8 + +/* PTE not present exception definition */ +#define ETYPE_NON_LEAF_PTE_NOT_PRESENT 0 +#define ETYPE_LEAF_PTE_NOT_PRESENT 1 + +/* General exception ETYPE definition */ +#define ETYPE_ALIGNMENT_CHECK 0 +#define ETYPE_RESERVED_INSTRUCTION 1 +#define ETYPE_TRAP 2 +#define ETYPE_ARITHMETIC 3 +#define ETYPE_PRECISE_BUS_ERROR 4 +#define ETYPE_IMPRECISE_BUS_ERROR 5 +#define ETYPE_COPROCESSOR 6 +#define ETYPE_RESERVED_VALUE 7 +#define ETYPE_NONEXISTENT_MEM_ADDRESS 8 +#define ETYPE_MPZIU_CONTROL 9 +#define ETYPE_NEXT_PRECISE_STACK_OFL 10 + +/* Kerenl reserves software ID */ +#define SWID_RAISE_INTERRUPT_LEVEL 0x1a /* SWID_RAISE_INTERRUPT_LEVEL is used to + * raise interrupt level for debug exception + */ + +/****************************************************************************** + * ir8: MERR (Machine Error Log Register) + *****************************************************************************/ +/* bit 0:30 reserved */ +#define MERR_offBUSERR 31 /* Bus error caused by a load insn */ + +#define MERR_mskBUSERR ( 0x1 << MERR_offBUSERR ) + +/****************************************************************************** + * ir9: IPC (Interruption Program Counter Register) + * ir10: P_IPC (Previous IPC Register) + * ir11: OIPC (Overflow Interruption Program Counter Register) + *****************************************************************************/ + + /* This is the shadow stack register of the Program Counter */ + +/****************************************************************************** + * ir12: P_P0 (Previous P0 Register) + * ir13: P_P1 (Previous P1 Register) + *****************************************************************************/ + + /* These are shadow registers of $p0 and $p1 */ + +/****************************************************************************** + * ir14: INT_MASK (Interruption Masking Register) + *****************************************************************************/ +#define INT_MASK_offH0IM 0 /* Hardware Interrupt 0 Mask bit */ +#define INT_MASK_offH1IM 1 /* Hardware Interrupt 1 Mask bit */ +#define INT_MASK_offH2IM 2 /* Hardware Interrupt 2 Mask bit */ +#define INT_MASK_offH3IM 3 /* Hardware Interrupt 3 Mask bit */ +#define INT_MASK_offH4IM 4 /* Hardware Interrupt 4 Mask bit */ +#define INT_MASK_offH5IM 5 /* Hardware Interrupt 5 Mask bit */ +/* bit 6:15 reserved */ +#define INT_MASK_offSIM 16 /* Software Interrupt Mask bit */ +/* bit 17:29 reserved */ +#define INT_MASK_offIDIVZE 30 /* Enable detection for Divide-By-Zero */ +#define INT_MASK_offDSSIM 31 /* Default Single Stepping Interruption Mask */ + +#define INT_MASK_mskH0IM ( 0x1 << INT_MASK_offH0IM ) +#define INT_MASK_mskH1IM ( 0x1 << INT_MASK_offH1IM ) +#define INT_MASK_mskH2IM ( 0x1 << INT_MASK_offH2IM ) +#define INT_MASK_mskH3IM ( 0x1 << INT_MASK_offH3IM ) +#define INT_MASK_mskH4IM ( 0x1 << INT_MASK_offH4IM ) +#define INT_MASK_mskH5IM ( 0x1 << INT_MASK_offH5IM ) +#define INT_MASK_mskSIM ( 0x1 << INT_MASK_offSIM ) +#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) +#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) + +#define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE) + +/****************************************************************************** + * ir15: INT_PEND (Interrupt Pending Register) + *****************************************************************************/ +#define INT_PEND_offH0I 0 /* Hardware Interrupt 0 pending bit */ +#define INT_PEND_offH1I 1 /* Hardware Interrupt 1 pending bit */ +#define INT_PEND_offH2I 2 /* Hardware Interrupt 2 pending bit */ +#define INT_PEND_offH3I 3 /* Hardware Interrupt 3 pending bit */ +#define INT_PEND_offH4I 4 /* Hardware Interrupt 4 pending bit */ +#define INT_PEND_offH5I 5 /* Hardware Interrupt 5 pending bit */ + +#define INT_PEND_offCIPL 0 /* Current Interrupt Priority Level */ + +/* bit 6:15 reserved */ +#define INT_PEND_offSWI 16 /* Software Interrupt pending bit */ +/* bit 17:31 reserved */ + +#define INT_PEND_mskH0I ( 0x1 << INT_PEND_offH0I ) +#define INT_PEND_mskH1I ( 0x1 << INT_PEND_offH1I ) +#define INT_PEND_mskH2I ( 0x1 << INT_PEND_offH2I ) +#define INT_PEND_mskH3I ( 0x1 << INT_PEND_offH3I ) +#define INT_PEND_mskH4I ( 0x1 << INT_PEND_offH4I ) +#define INT_PEND_mskH5I ( 0x1 << INT_PEND_offH5I ) +#define INT_PEND_mskCIPL ( 0x1 << INT_PEND_offCIPL ) +#define INT_PEND_mskSWI ( 0x1 << INT_PEND_offSWI ) + +/****************************************************************************** + * mr0: MMU_CTL (MMU Control Register) + *****************************************************************************/ +#define MMU_CTL_offD 0 /* Default minimum page size */ +#define MMU_CTL_offNTC0 1 /* Non-Translated Cachebility of partition 0 */ +#define MMU_CTL_offNTC1 3 /* Non-Translated Cachebility of partition 1 */ +#define MMU_CTL_offNTC2 5 /* Non-Translated Cachebility of partition 2 */ +#define MMU_CTL_offNTC3 7 /* Non-Translated Cachebility of partition 3 */ +#define MMU_CTL_offTBALCK 9 /* TLB all-lock resolution scheme */ +#define MMU_CTL_offMPZIU 10 /* Multiple Page Size In Use bit */ +#define MMU_CTL_offNTM0 11 /* Non-Translated VA to PA of partition 0 */ +#define MMU_CTL_offNTM1 13 /* Non-Translated VA to PA of partition 1 */ +#define MMU_CTL_offNTM2 15 /* Non-Translated VA to PA of partition 2 */ +#define MMU_CTL_offNTM3 17 /* Non-Translated VA to PA of partition 3 */ +#define MMU_CTL_offUNA 23 /* Unaligned access */ +/* bit 24:31 reserved */ + +#define MMU_CTL_mskD ( 0x1 << MMU_CTL_offD ) +#define MMU_CTL_mskNTC0 ( 0x3 << MMU_CTL_offNTC0 ) +#define MMU_CTL_mskNTC1 ( 0x3 << MMU_CTL_offNTC1 ) +#define MMU_CTL_mskNTC2 ( 0x3 << MMU_CTL_offNTC2 ) +#define MMU_CTL_mskNTC3 ( 0x3 << MMU_CTL_offNTC3 ) +#define MMU_CTL_mskTBALCK ( 0x1 << MMU_CTL_offTBALCK ) +#define MMU_CTL_mskMPZIU ( 0x1 << MMU_CTL_offMPZIU ) +#define MMU_CTL_mskNTM0 ( 0x3 << MMU_CTL_offNTM0 ) +#define MMU_CTL_mskNTM1 ( 0x3 << MMU_CTL_offNTM1 ) +#define MMU_CTL_mskNTM2 ( 0x3 << MMU_CTL_offNTM2 ) +#define MMU_CTL_mskNTM3 ( 0x3 << MMU_CTL_offNTM3 ) + +#define MMU_CTL_D4KB 0 +#define MMU_CTL_D8KB 1 +#define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) + +#define MMU_CTL_CACHEABLE_NON 0 +#define MMU_CTL_CACHEABLE_WB 2 +#define MMU_CTL_CACHEABLE_WT 3 + +/****************************************************************************** + * mr1: L1_PPTB (L1 Physical Page Table Base Register) + *****************************************************************************/ +#define L1_PPTB_offNV 0 /* Enable Hardware Page Table Walker (HPTWK) */ +/* bit 1:11 reserved */ +#define L1_PPTB_offBASE 12 /* First level physical page table base address */ + +#define L1_PPTB_mskNV ( 0x1 << L1_PPTB_offNV ) +#define L1_PPTB_mskBASE ( 0xFFFFF << L1_PPTB_offBASE ) + +/****************************************************************************** + * mr2: TLB_VPN (TLB Access VPN Register) + *****************************************************************************/ +/* bit 0:11 reserved */ +#define TLB_VPN_offVPN 12 /* Virtual Page Number */ + +#define TLB_VPN_mskVPN ( 0xFFFFF << TLB_VPN_offVPN ) + +/****************************************************************************** + * mr3: TLB_DATA (TLB Access Data Register) + *****************************************************************************/ +#define TLB_DATA_offV 0 /* PTE is valid and present */ +#define TLB_DATA_offM 1 /* Page read/write access privilege */ +#define TLB_DATA_offD 4 /* Dirty bit */ +#define TLB_DATA_offX 5 /* Executable bit */ +#define TLB_DATA_offA 6 /* Access bit */ +#define TLB_DATA_offG 7 /* Global page (shared across contexts) */ +#define TLB_DATA_offC 8 /* Cacheability atribute */ +/* bit 11:11 reserved */ +#define TLB_DATA_offPPN 12 /* Phisical Page Number */ + +#define TLB_DATA_mskV ( 0x1 << TLB_DATA_offV ) +#define TLB_DATA_mskM ( 0x7 << TLB_DATA_offM ) +#define TLB_DATA_mskD ( 0x1 << TLB_DATA_offD ) +#define TLB_DATA_mskX ( 0x1 << TLB_DATA_offX ) +#define TLB_DATA_mskA ( 0x1 << TLB_DATA_offA ) +#define TLB_DATA_mskG ( 0x1 << TLB_DATA_offG ) +#define TLB_DATA_mskC ( 0x7 << TLB_DATA_offC ) +#define TLB_DATA_mskPPN ( 0xFFFFF << TLB_DATA_offPPN ) + +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH +#define TLB_DATA_kernel_text_attr (TLB_DATA_mskV|TLB_DATA_mskM|TLB_DATA_mskD|TLB_DATA_mskX|TLB_DATA_mskG|TLB_DATA_mskC) +#else +#define TLB_DATA_kernel_text_attr (TLB_DATA_mskV|TLB_DATA_mskM|TLB_DATA_mskD|TLB_DATA_mskX|TLB_DATA_mskG|(0x6 << TLB_DATA_offC)) +#endif + +/****************************************************************************** + * mr4: TLB_MISC (TLB Access Misc Register) + *****************************************************************************/ +#define TLB_MISC_offACC_PSZ 0 /* Page size of a PTE entry */ +#define TLB_MISC_offCID 4 /* Context id */ +/* bit 13:31 reserved */ + +#define TLB_MISC_mskACC_PSZ ( 0xF << TLB_MISC_offACC_PSZ ) +#define TLB_MISC_mskCID ( 0x1FF << TLB_MISC_offCID ) + +/****************************************************************************** + * mr5: VLPT_IDX (Virtual Linear Page Table Index Register) + *****************************************************************************/ +#define VLPT_IDX_offZERO 0 /* Always 0 */ +#define VLPT_IDX_offEVPN 2 /* Exception Virtual Page Number */ +#define VLPT_IDX_offVLPTB 22 /* Base VA of VLPT */ + +#define VLPT_IDX_mskZERO ( 0x3 << VLPT_IDX_offZERO ) +#define VLPT_IDX_mskEVPN ( 0xFFFFF << VLPT_IDX_offEVPN ) +#define VLPT_IDX_mskVLPTB ( 0x3FF << VLPT_IDX_offVLPTB ) + +/****************************************************************************** + * mr6: ILMB (Instruction Local Memory Base Register) + *****************************************************************************/ +#define ILMB_offIEN 0 /* Enable ILM */ +#define ILMB_offILMSZ 1 /* Size of ILM */ +/* bit 5:19 reserved */ +#define ILMB_offIBPA 20 /* Base PA of ILM */ + +#define ILMB_mskIEN ( 0x1 << ILMB_offIEN ) +#define ILMB_mskILMSZ ( 0xF << ILMB_offILMSZ ) +#define ILMB_mskIBPA ( 0xFFF << ILMB_offIBPA ) + +/****************************************************************************** + * mr7: DLMB (Data Local Memory Base Register) + *****************************************************************************/ +#define DLMB_offDEN 0 /* Enable DLM */ +#define DLMB_offDLMSZ 1 /* Size of DLM */ +#define DLMB_offDBM 5 /* Enable Double-Buffer Mode for DLM */ +#define DLMB_offDBB 6 /* Double-buffer bank which can be accessed by the processor */ +/* bit 7:19 reserved */ +#define DLMB_offDBPA 20 /* Base PA of DLM */ + +#define DLMB_mskDEN ( 0x1 << DLMB_offDEN ) +#define DLMB_mskDLMSZ ( 0xF << DLMB_offDLMSZ ) +#define DLMB_mskDBM ( 0x1 << DLMB_offDBM ) +#define DLMB_mskDBB ( 0x1 << DLMB_offDBB ) +#define DLMB_mskDBPA ( 0xFFF << DLMB_offDBPA ) + +/****************************************************************************** + * mr8: CACHE_CTL (Cache Control Register) + *****************************************************************************/ +#define CACHE_CTL_offIC_EN 0 /* Enable I-cache */ +#define CACHE_CTL_offDC_EN 1 /* Enable D-cache */ +#define CACHE_CTL_offICALCK 2 /* I-cache all-lock resolution scheme */ +#define CACHE_CTL_offDCALCK 3 /* D-cache all-lock resolution scheme */ +#define CACHE_CTL_offDCCWF 4 /* Enable D-cache Critical Word Forwarding */ +#define CACHE_CTL_offDCPMW 5 /* Enable D-cache concurrent miss and write-back processing */ +/* bit 6:31 reserved */ + +#define CACHE_CTL_mskIC_EN ( 0x1 << CACHE_CTL_offIC_EN ) +#define CACHE_CTL_mskDC_EN ( 0x1 << CACHE_CTL_offDC_EN ) +#define CACHE_CTL_mskICALCK ( 0x1 << CACHE_CTL_offICALCK ) +#define CACHE_CTL_mskDCALCK ( 0x1 << CACHE_CTL_offDCALCK ) +#define CACHE_CTL_mskDCCWF ( 0x1 << CACHE_CTL_offDCCWF ) +#define CACHE_CTL_mskDCPMW ( 0x1 << CACHE_CTL_offDCPMW ) + +/****************************************************************************** + * mr9: HSMP_SADDR (High Speed Memory Port Starting Address) + *****************************************************************************/ +#define HSMP_SADDR_offEN 0 /* Enable control bit for the High Speed Memory port */ +/* bit 1:19 reserved */ + +#define HSMP_SADDR_offRANGE 1 /* Denote the address range (only defined in HSMP v2 ) */ +#define HSMP_SADDR_offSADDR 20 /* Starting base PA of the High Speed Memory Port region */ + +#define HSMP_SADDR_mskEN ( 0x1 << HSMP_SADDR_offEN ) +#define HSMP_SADDR_mskRANGE ( 0xFFF << HSMP_SADDR_offRANGE ) +#define HSMP_SADDR_mskSADDR ( 0xFFF << HSMP_SADDR_offSADDR ) + +/****************************************************************************** + * mr10: HSMP_EADDR (High Speed Memory Port Ending Address) + *****************************************************************************/ +/* bit 0:19 reserved */ +#define HSMP_EADDR_offEADDR 20 + +#define HSMP_EADDR_mskEADDR ( 0xFFF << HSMP_EADDR_offEADDR ) + +/****************************************************************************** + * dr0+(n*5): BPCn (n=0-7) (Breakpoint Control Register) + *****************************************************************************/ +#define BPC_offWP 0 /* Configuration of BPAn */ +#define BPC_offEL 1 /* Enable BPAn */ +#define BPC_offS 2 /* Data address comparison for a store instruction */ +#define BPC_offP 3 /* Compared data address is PA */ +#define BPC_offC 4 /* CID value is compared with the BPCIDn register */ +#define BPC_offBE0 5 /* Enable byte mask for the comparison with register */ +#define BPC_offBE1 6 /* Enable byte mask for the comparison with register */ +#define BPC_offBE2 7 /* Enable byte mask for the comparison with register */ +#define BPC_offBE3 8 /* Enable byte mask for the comparison with register */ +#define BPC_offT 9 /* Enable breakpoint Embedded Tracer triggering operation */ + +#define BPC_mskWP ( 0x1 << BPC_offWP ) +#define BPC_mskEL ( 0x1 << BPC_offEL ) +#define BPC_mskS ( 0x1 << BPC_offS ) +#define BPC_mskP ( 0x1 << BPC_offP ) +#define BPC_mskC ( 0x1 << BPC_offC ) +#define BPC_mskBE0 ( 0x1 << BPC_offBE0 ) +#define BPC_mskBE1 ( 0x1 << BPC_offBE1 ) +#define BPC_mskBE2 ( 0x1 << BPC_offBE2 ) +#define BPC_mskBE3 ( 0x1 << BPC_offBE3 ) +#define BPC_mskT ( 0x1 << BPC_offT ) + +/****************************************************************************** + * dr1+(n*5): BPAn (n=0-7) (Breakpoint Address Register) + *****************************************************************************/ + + /* These registers contain break point address */ + +/****************************************************************************** + * dr2+(n*5): BPAMn (n=0-7) (Breakpoint Address Mask Register) + *****************************************************************************/ + + /* These registerd contain the address comparison mask for the BPAn register */ + +/****************************************************************************** + * dr3+(n*5): BPVn (n=0-7) Breakpoint Data Value Register + *****************************************************************************/ + + /* The BPVn register contains the data value that will be compared with the + * incoming load/store data value */ + +/****************************************************************************** + * dr4+(n*5): BPCIDn (n=0-7) (Breakpoint Context ID Register) + *****************************************************************************/ +#define BPCID_offCID 0 /* CID that will be compared with a process's CID */ +/* bit 9:31 reserved */ + +#define BPCID_mskCID ( 0x1FF << BPCID_offCID ) + +/****************************************************************************** + * dr40: EDM_CFG (EDM Configuration Register) + *****************************************************************************/ +#define EDM_CFG_offBC 0 /* Number of hardware breakpoint sets implemented */ +#define EDM_CFG_offDIMU 3 /* Debug Instruction Memory Unit exists */ +/* bit 4:15 reserved */ +#define EDM_CFG_offVER 16 /* EDM version */ + +#define EDM_CFG_mskBC ( 0x7 << EDM_CFG_offBC ) +#define EDM_CFG_mskDIMU ( 0x1 << EDM_CFG_offDIMU ) +#define EDM_CFG_mskVER ( 0xFFFF << EDM_CFG_offVER ) + +/****************************************************************************** + * dr41: EDMSW (EDM Status Word) + *****************************************************************************/ +#define EDMSW_offWV 0 /* Write Valid */ +#define EDMSW_offRV 1 /* Read Valid */ +#define EDMSW_offDE 2 /* Debug exception has occurred for this core */ +/* bit 3:31 reserved */ + +#define EDMSW_mskWV ( 0x1 << EDMSW_offWV ) +#define EDMSW_mskRV ( 0x1 << EDMSW_offRV ) +#define EDMSW_mskDE ( 0x1 << EDMSW_offDE ) + +/****************************************************************************** + * dr42: EDM_CTL (EDM Control Register) + *****************************************************************************/ +/* bit 0:30 reserved */ +#define EDM_CTL_offV3_EDM_MODE 6 /* EDM compatibility control bit */ +#define EDM_CTL_offDEH_SEL 31 /* Controls where debug exception is directed to */ + +#define EDM_CTL_mskV3_EDM_MODE ( 0x1 << EDM_CTL_offV3_EDM_MODE ) +#define EDM_CTL_mskDEH_SEL ( 0x1 << EDM_CTL_offDEH_SEL ) + +/****************************************************************************** + * dr43: EDM_DTR (EDM Data Transfer Register) + *****************************************************************************/ + + /* This is used to exchange data between the embedded EDM logic + * and the processor core */ + +/****************************************************************************** + * dr44: BPMTC (Breakpoint Match Trigger Counter Register) + *****************************************************************************/ +#define BPMTC_offBPMTC 0 /* Breakpoint match trigger counter value */ +/* bit 16:31 reserved */ + +#define BPMTC_mskBPMTC ( 0xFFFF << BPMTC_offBPMTC ) + +/****************************************************************************** + * dr45: DIMBR (Debug Instruction Memory Base Register) + *****************************************************************************/ +/* bit 0:11 reserved */ +#define DIMBR_offDIMB 12 /* Base address of the Debug Instruction Memory (DIM) */ +#define DIMBR_mskDIMB ( 0xFFFFF << DIMBR_offDIMB ) + +/****************************************************************************** + * dr46: TECR0(Trigger Event Control register 0) + * dr47: TECR1 (Trigger Event Control register 1) + *****************************************************************************/ +#define TECR_offBP 0 /* Controld which BP is used as a trigger source */ +#define TECR_offNMI 8 /* Use NMI as a trigger source */ +#define TECR_offHWINT 9 /* Corresponding interrupt is used as a trigger source */ +#define TECR_offEVIC 15 /* Enable HWINT as a trigger source in EVIC mode */ +#define TECR_offSYS 16 /* Enable SYSCALL instruction as a trigger source */ +#define TECR_offDBG 17 /* Enable debug exception as a trigger source */ +#define TECR_offMRE 18 /* Enable MMU related exception as a trigger source */ +#define TECR_offE 19 /* An exception is used as a trigger source */ +/* bit 20:30 reserved */ +#define TECR_offL 31 /* Link/Cascade TECR0 trigger event to TECR1 trigger event */ + +#define TECR_mskBP ( 0xFF << TECR_offBP ) +#define TECR_mskNMI ( 0x1 << TECR_offBNMI ) +#define TECR_mskHWINT ( 0x3F << TECR_offBHWINT ) +#define TECR_mskEVIC ( 0x1 << TECR_offBEVIC ) +#define TECR_mskSYS ( 0x1 << TECR_offBSYS ) +#define TECR_mskDBG ( 0x1 << TECR_offBDBG ) +#define TECR_mskMRE ( 0x1 << TECR_offBMRE ) +#define TECR_mskE ( 0x1 << TECR_offE ) +#define TECR_mskL ( 0x1 << TECR_offL ) + +/****************************************************************************** + * pfr0-2: PFMC0-2 (Performance Counter Register 0-2) + *****************************************************************************/ + + /* These registers contains performance event count */ + +/****************************************************************************** + * pfr3: PFM_CTL (Performance Counter Control Register) + *****************************************************************************/ +#define PFM_CTL_offEN0 0 /* Enable PFMC0 */ +#define PFM_CTL_offEN1 1 /* Enable PFMC1 */ +#define PFM_CTL_offEN2 2 /* Enable PFMC2 */ +#define PFM_CTL_offIE0 3 /* Enable interrupt for PFMC0 */ +#define PFM_CTL_offIE1 4 /* Enable interrupt for PFMC1 */ +#define PFM_CTL_offIE2 5 /* Enable interrupt for PFMC2 */ +#define PFM_CTL_offOVF0 6 /* Overflow bit of PFMC0 */ +#define PFM_CTL_offOVF1 7 /* Overflow bit of PFMC1 */ +#define PFM_CTL_offOVF2 8 /* Overflow bit of PFMC2 */ +#define PFM_CTL_offKS0 9 /* Enable superuser mode event counting for PFMC0 */ +#define PFM_CTL_offKS1 10 /* Enable superuser mode event counting for PFMC1 */ +#define PFM_CTL_offKS2 11 /* Enable superuser mode event counting for PFMC2 */ +#define PFM_CTL_offKU0 12 /* Enable user mode event counting for PFMC0 */ +#define PFM_CTL_offKU1 13 /* Enable user mode event counting for PFMC1 */ +#define PFM_CTL_offKU2 14 /* Enable user mode event counting for PFMC2 */ +#define PFM_CTL_offSEL0 15 /* The event selection for PFMC0 */ +#define PFM_CTL_offSEL1 16 /* The event selection for PFMC1 */ +#define PFM_CTL_offSEL2 22 /* The event selection for PFMC2 */ +/* bit 28:31 reserved */ + +#define PFM_CTL_mskEN0 ( 0x01 << PFM_CTL_offEN0 ) +#define PFM_CTL_mskEN1 ( 0x01 << PFM_CTL_offEN1 ) +#define PFM_CTL_mskEN2 ( 0x01 << PFM_CTL_offEN2 ) +#define PFM_CTL_mskIE0 ( 0x01 << PFM_CTL_offIE0 ) +#define PFM_CTL_mskIE1 ( 0x01 << PFM_CTL_offIE1 ) +#define PFM_CTL_mskIE2 ( 0x01 << PFM_CTL_offIE2 ) +#define PFM_CTL_mskOVF0 ( 0x01 << PFM_CTL_offOVF0 ) +#define PFM_CTL_mskOVF1 ( 0x01 << PFM_CTL_offOVF1 ) +#define PFM_CTL_mskOVF2 ( 0x01 << PFM_CTL_offOVF2 ) +#define PFM_CTL_mskKS0 ( 0x01 << PFM_CTL_offKS0 ) +#define PFM_CTL_mskKS1 ( 0x01 << PFM_CTL_offKS1 ) +#define PFM_CTL_mskKS2 ( 0x01 << PFM_CTL_offKS2 ) +#define PFM_CTL_mskKU0 ( 0x01 << PFM_CTL_offKU0 ) +#define PFM_CTL_mskKU1 ( 0x01 << PFM_CTL_offKU1 ) +#define PFM_CTL_mskKU2 ( 0x01 << PFM_CTL_offKU2 ) +#define PFM_CTL_mskSEL0 ( 0x01 << PFM_CTL_offSEL0 ) +#define PFM_CTL_mskSEL1 ( 0x3F << PFM_CTL_offSEL1 ) +#define PFM_CTL_mskSEL2 ( 0x3F << PFM_CTL_offSEL2 ) + +/****************************************************************************** + * SDZ_CTL (Structure Downsizing Control Register) + *****************************************************************************/ +#define SDZ_CTL_offICDZ 0 /* I-cache downsizing control */ +#define SDZ_CTL_offDCDZ 3 /* D-cache downsizing control */ +#define SDZ_CTL_offMTBDZ 6 /* MTLB downsizing control */ +#define SDZ_CTL_offBTBDZ 9 /* Branch Target Table downsizing control */ +/* bit 12:31 reserved */ +#define SDZ_CTL_mskICDZ ( 0x07 << SDZ_CTL_offICDZ ) +#define SDZ_CTL_mskDCDZ ( 0x07 << SDZ_CTL_offDCDZ ) +#define SDZ_CTL_mskMTBDZ ( 0x07 << SDZ_CTL_offMTBDZ ) +#define SDZ_CTL_mskBTBDZ ( 0x07 << SDZ_CTL_offBTBDZ ) + +/****************************************************************************** + * N13MISC_CTL (N13 Miscellaneous Control Register) + *****************************************************************************/ +#define N13MISC_CTL_offBTB 0 /* Disable Branch Target Buffer */ +#define N13MISC_CTL_offRTP 1 /* Disable Return Target Predictor */ +#define N13MISC_CTL_offPTEPF 2 /* Disable HPTWK L2 PTE pefetch */ +#define N13MISC_CTL_offSP_SHADOW_EN 4 /* Enable shadow stack pointers */ +#define MISC_CTL_offHWPRE 11 /* Enable HardWare PREFETCH */ +/* bit 6, 9:31 reserved */ + +#define N13MISC_CTL_makBTB ( 0x1 << N13MISC_CTL_offBTB ) +#define N13MISC_CTL_makRTP ( 0x1 << N13MISC_CTL_offRTP ) +#define N13MISC_CTL_makPTEPF ( 0x1 << N13MISC_CTL_offPTEPF ) +#define N13MISC_CTL_makSP_SHADOW_EN ( 0x1 << N13MISC_CTL_offSP_SHADOW_EN ) +#define MISC_CTL_makHWPRE_EN ( 0x1 << MISC_CTL_offHWPRE ) + +#ifdef CONFIG_HW_PRE +#define MISC_init (N13MISC_CTL_makBTB|N13MISC_CTL_makRTP|N13MISC_CTL_makSP_SHADOW_EN|MISC_CTL_makHWPRE_EN) +#else +#define MISC_init (N13MISC_CTL_makBTB|N13MISC_CTL_makRTP|N13MISC_CTL_makSP_SHADOW_EN) +#endif + +/****************************************************************************** + * PRUSR_ACC_CTL (Privileged Resource User Access Control Registers) + *****************************************************************************/ +#define PRUSR_ACC_CTL_offDMA_EN 0 /* Allow user mode access of DMA registers */ +#define PRUSR_ACC_CTL_offPFM_EN 1 /* Allow user mode access of PFM registers */ + +#define PRUSR_ACC_CTL_mskDMA_EN ( 0x1 << PRUSR_ACC_CTL_offDMA_EN ) +#define PRUSR_ACC_CTL_mskPFM_EN ( 0x1 << PRUSR_ACC_CTL_offPFM_EN ) + +/****************************************************************************** + * dmar0: DMA_CFG (DMA Configuration Register) + *****************************************************************************/ +#define DMA_CFG_offNCHN 0 /* The number of DMA channels implemented */ +#define DMA_CFG_offUNEA 2 /* Un-aligned External Address transfer feature */ +#define DMA_CFG_off2DET 3 /* 2-D Element Transfer feature */ +/* bit 4:15 reserved */ +#define DMA_CFG_offVER 16 /* DMA architecture and implementation version */ + +#define DMA_CFG_mskNCHN ( 0x3 << DMA_CFG_offNCHN ) +#define DMA_CFG_mskUNEA ( 0x1 << DMA_CFG_offUNEA ) +#define DMA_CFG_msk2DET ( 0x1 << DMA_CFG_off2DET ) +#define DMA_CFG_mskVER ( 0xFFFF << DMA_CFG_offVER ) + +/****************************************************************************** + * dmar1: DMA_GCSW (DMA Global Control and Status Word Register) + *****************************************************************************/ +#define DMA_GCSW_offC0STAT 0 /* DMA channel 0 state */ +#define DMA_GCSW_offC1STAT 3 /* DMA channel 1 state */ +/* bit 6:11 reserved */ +#define DMA_GCSW_offC0INT 12 /* DMA channel 0 generate interrupt */ +#define DMA_GCSW_offC1INT 13 /* DMA channel 1 generate interrupt */ +/* bit 14:30 reserved */ +#define DMA_GCSW_offEN 31 /* Enable DMA engine */ + +#define DMA_GCSW_mskC0STAT ( 0x7 << DMA_GCSW_offC0STAT ) +#define DMA_GCSW_mskC1STAT ( 0x7 << DMA_GCSW_offC1STAT ) +#define DMA_GCSW_mskC0INT ( 0x1 << DMA_GCSW_offC0INT ) +#define DMA_GCSW_mskC1INT ( 0x1 << DMA_GCSW_offC1INT ) +#define DMA_GCSW_mskEN ( 0x1 << DMA_GCSW_offEN ) + +/****************************************************************************** + * dmar2: DMA_CHNSEL (DMA Channel Selection Register) + *****************************************************************************/ +#define DMA_CHNSEL_offCHAN 0 /* Selected channel number */ +/* bit 2:31 reserved */ + +#define DMA_CHNSEL_mskCHAN ( 0x3 << DMA_CHNSEL_offCHAN ) + +/****************************************************************************** + * dmar3: DMA_ACT (DMA Action Register) + *****************************************************************************/ +#define DMA_ACT_offACMD 0 /* DMA Action Command */ +/* bit 2:31 reserved */ +#define DMA_ACT_mskACMD ( 0x3 << DMA_ACT_offACMD ) + +/****************************************************************************** + * dmar4: DMA_SETUP (DMA Setup Register) + *****************************************************************************/ +#define DMA_SETUP_offLM 0 /* Local Memory Selection */ +#define DMA_SETUP_offTDIR 1 /* Transfer Direction */ +#define DMA_SETUP_offTES 2 /* Transfer Element Size */ +#define DMA_SETUP_offESTR 4 /* External memory transfer Stride */ +#define DMA_SETUP_offCIE 16 /* Interrupt Enable on Completion */ +#define DMA_SETUP_offSIE 17 /* Interrupt Enable on explicit Stop */ +#define DMA_SETUP_offEIE 18 /* Interrupt Enable on Error */ +#define DMA_SETUP_offUE 19 /* Enable the Un-aligned External Address */ +#define DMA_SETUP_off2DE 20 /* Enable the 2-D External Transfer */ +#define DMA_SETUP_offCOA 21 /* Transfer Coalescable */ +/* bit 22:31 reserved */ + +#define DMA_SETUP_mskLM ( 0x1 << DMA_SETUP_offLM ) +#define DMA_SETUP_mskTDIR ( 0x1 << DMA_SETUP_offTDIR ) +#define DMA_SETUP_mskTES ( 0x3 << DMA_SETUP_offTES ) +#define DMA_SETUP_mskESTR ( 0xFFF << DMA_SETUP_offESTR ) +#define DMA_SETUP_mskCIE ( 0x1 << DMA_SETUP_offCIE ) +#define DMA_SETUP_mskSIE ( 0x1 << DMA_SETUP_offSIE ) +#define DMA_SETUP_mskEIE ( 0x1 << DMA_SETUP_offEIE ) +#define DMA_SETUP_mskUE ( 0x1 << DMA_SETUP_offUE ) +#define DMA_SETUP_msk2DE ( 0x1 << DMA_SETUP_off2DE ) +#define DMA_SETUP_mskCOA ( 0x1 << DMA_SETUP_offCOA ) + +/****************************************************************************** + * dmar5: DMA_ISADDR (DMA Internal Start Address Register) + *****************************************************************************/ +#define DMA_ISADDR_offISADDR 0 /* Internal Start Address */ +/* bit 20:31 reserved */ +#define DMA_ISADDR_mskISADDR ( 0xFFFFF << DMA_ISADDR_offISADDR ) + +/****************************************************************************** + * dmar6: DMA_ESADDR (DMA External Start Address Register) + *****************************************************************************/ +/* This register holds External Start Address */ + +/****************************************************************************** + * dmar7: DMA_TCNT (DMA Transfer Element Count Register) + *****************************************************************************/ +#define DMA_TCNT_offTCNT 0 /* DMA transfer element count */ +/* bit 18:31 reserved */ +#define DMA_TCNT_mskTCNT ( 0x3FFFF << DMA_TCNT_offTCNT ) + +/****************************************************************************** + * dmar8: DMA_STATUS (DMA Status Register) + *****************************************************************************/ +#define DMA_STATUS_offSTAT 0 /* DMA channel state */ +#define DMA_STATUS_offSTUNA 3 /* Un-aligned error on External Stride value */ +#define DMA_STATUS_offDERR 4 /* DMA Transfer Disruption Error */ +#define DMA_STATUS_offEUNA 5 /* Un-aligned error on the External address */ +#define DMA_STATUS_offIUNA 6 /* Un-aligned error on the Internal address */ +#define DMA_STATUS_offIOOR 7 /* Out-Of-Range error on the Internal address */ +#define DMA_STATUS_offEBUS 8 /* Bus Error on an External DMA transfer */ +#define DMA_STATUS_offESUP 9 /* DMA setup error */ +/* bit 10:31 reserved */ + +#define DMA_STATUS_mskSTAT ( 0x7 << DMA_STATUS_offSTAT ) +#define DMA_STATUS_mskSTUNA ( 0x1 << DMDMA_STATUS_offSTUNA ) +#define DMA_STATUS_mskDERR ( 0x1 << DMDMA_STATUS_offDERR ) +#define DMA_STATUS_mskEUNA ( 0x1 << DMDMA_STATUS_offEUNA ) +#define DMA_STATUS_mskIUNA ( 0x1 << DMDMA_STATUS_offIUNA ) +#define DMA_STATUS_mskIOOR ( 0x1 << DMDMA_STATUS_offIOOR ) +#define DMA_STATUS_mskEBUS ( 0x1 << DMDMA_STATUS_offEBUS ) +#define DMA_STATUS_mskESUP ( 0x1 << DMDMA_STATUS_offESUP ) + +/****************************************************************************** + * dmar9: DMA_2DSET (DMA 2D Setup Register) + *****************************************************************************/ +#define DMA_2DSET_offWECNT 0 /* The Width Element Count for a 2-D region */ +#define DMA_2DSET_offHTSTR 16 /* The Height Stride for a 2-D region */ + +#define DMA_2DSET_mskHTSTR ( 0xFFFF << DMA_2DSET_offHTSTR ) +#define DMA_2DSET_mskWECNT ( 0xFFFF << DMA_2DSET_offWECNT ) + +/****************************************************************************** + * dmar10: DMA_2DSCTL (DMA 2D Startup Control Register) + *****************************************************************************/ +#define DMA_2DSCTL_offSTWECNT 0 /* Startup Width Element Count for a 2-D region */ +/* bit 16:31 reserved */ + +#define DMA_2DSCTL_mskSTWECNT ( 0xFFFF << DMA_2DSCTL_offSTWECNT ) + +/****************************************************************************** + * fpcsr: FPCSR (Floating-Point Control Status Register) + *****************************************************************************/ +#define FPCSR_offRM 0 +#define FPCSR_offIVO 2 +#define FPCSR_offDBZ 3 +#define FPCSR_offOVF 4 +#define FPCSR_offUDF 5 +#define FPCSR_offIEX 6 +#define FPCSR_offIVOE 7 +#define FPCSR_offDBZE 8 +#define FPCSR_offOVFE 9 +#define FPCSR_offUDFE 10 +#define FPCSR_offIEXE 11 +#define FPCSR_offDNZ 12 +#define FPCSR_offIVOT 13 +#define FPCSR_offDBZT 14 +#define FPCSR_offOVFT 15 +#define FPCSR_offUDFT 16 +#define FPCSR_offIEXT 17 +#define FPCSR_offDNIT 18 +#define FPCSR_offRIT 19 + +#define FPCSR_mskRM ( 0x3 << FPCSR_offRM ) +#define FPCSR_mskIVO ( 0x1 << FPCSR_offIVO ) +#define FPCSR_mskDBZ ( 0x1 << FPCSR_offDBZ ) +#define FPCSR_mskOVF ( 0x1 << FPCSR_offOVF ) +#define FPCSR_mskUDF ( 0x1 << FPCSR_offUDF ) +#define FPCSR_mskIEX ( 0x1 << FPCSR_offIEX ) +#define FPCSR_mskIVOE ( 0x1 << FPCSR_offIVOE ) +#define FPCSR_mskDBZE ( 0x1 << FPCSR_offDBZE ) +#define FPCSR_mskOVFE ( 0x1 << FPCSR_offOVFE ) +#define FPCSR_mskUDFE ( 0x1 << FPCSR_offUDFE ) +#define FPCSR_mskIEXE ( 0x1 << FPCSR_offIEXE ) +#define FPCSR_mskDNZ ( 0x1 << FPCSR_offDNZ ) +#define FPCSR_mskIVOT ( 0x1 << FPCSR_offIVOT ) +#define FPCSR_mskDBZT ( 0x1 << FPCSR_offDBZT ) +#define FPCSR_mskOVFT ( 0x1 << FPCSR_offOVFT ) +#define FPCSR_mskUDFT ( 0x1 << FPCSR_offUDFT ) +#define FPCSR_mskIEXT ( 0x1 << FPCSR_offIEXT ) +#define FPCSR_mskDNIT ( 0x1 << FPCSR_offDNIT ) +#define FPCSR_mskRIT ( 0x1 << FPCSR_offRIT ) +#define FPCSR_mskALL (FPCSR_mskIVO | FPCSR_mskDBZ | FPCSR_mskOVF | FPCSR_mskUDF | FPCSR_mskIEX) +#define FPCSR_mskALLE_NO_UDF_IEXE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE) +#define FPCSR_mskALLE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskUDFE | FPCSR_mskIEXE) +#define FPCSR_mskALLT (FPCSR_mskIVOT | FPCSR_mskDBZT | FPCSR_mskOVFT | FPCSR_mskUDFT | FPCSR_mskIEXT |FPCSR_mskDNIT | FPCSR_mskRIT) + +/****************************************************************************** + * fpcfg: FPCFG (Floating-Point Configuration Register) + *****************************************************************************/ +#define FPCFG_offSP 0 +#define FPCFG_offDP 1 +#define FPCFG_offFREG 2 +#define FPCFG_offFMA 4 +#define FPCFG_offIMVER 22 +#define FPCFG_offAVER 27 + +#define FPCFG_mskSP ( 0x1 << FPCFG_offSP ) +#define FPCFG_mskDP ( 0x1 << FPCFG_offDP ) +#define FPCFG_mskFREG ( 0x3 << FPCFG_offFREG ) +#define FPCFG_mskFMA ( 0x1 << FPCFG_offFMA ) +#define FPCFG_mskIMVER ( 0x1F << FPCFG_offIMVER ) +#define FPCFG_mskAVER ( 0x1F << FPCFG_offAVER ) + +/* 8 Single precision or 4 double precision registers are available */ +#define SP8_DP4_reg 0 +/* 16 Single precision or 8 double precision registers are available */ +#define SP16_DP8_reg 1 +/* 32 Single precision or 16 double precision registers are available */ +#define SP32_DP16_reg 2 +/* 32 Single precision or 32 double precision registers are available */ +#define SP32_DP32_reg 3 + +/****************************************************************************** + * fucpr: FUCOP_CTL (FPU and Coprocessor Enable Control Register) + *****************************************************************************/ +#define FUCOP_CTL_offCP0EN 0 +#define FUCOP_CTL_offCP1EN 1 +#define FUCOP_CTL_offCP2EN 2 +#define FUCOP_CTL_offCP3EN 3 +#define FUCOP_CTL_offAUEN 31 + +#define FUCOP_CTL_mskCP0EN ( 0x1 << FUCOP_CTL_offCP0EN ) +#define FUCOP_CTL_mskCP1EN ( 0x1 << FUCOP_CTL_offCP1EN ) +#define FUCOP_CTL_mskCP2EN ( 0x1 << FUCOP_CTL_offCP2EN ) +#define FUCOP_CTL_mskCP3EN ( 0x1 << FUCOP_CTL_offCP3EN ) +#define FUCOP_CTL_mskAUEN ( 0x1 << FUCOP_CTL_offAUEN ) + +#endif /* __NDS32_BITFIELD_H__ */ diff --git a/arch/nds32/include/asm/cache.h b/arch/nds32/include/asm/cache.h new file mode 100644 index 000000000..fc3c41b59 --- /dev/null +++ b/arch/nds32/include/asm/cache.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_CACHE_H__ +#define __NDS32_CACHE_H__ + +#define L1_CACHE_BYTES 32 +#define L1_CACHE_SHIFT 5 + +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +#endif /* __NDS32_CACHE_H__ */ diff --git a/arch/nds32/include/asm/cache_info.h b/arch/nds32/include/asm/cache_info.h new file mode 100644 index 000000000..e89d8078f --- /dev/null +++ b/arch/nds32/include/asm/cache_info.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +struct cache_info { + unsigned char ways; + unsigned char line_size; + unsigned short sets; + unsigned short size; +#if defined(CONFIG_CPU_CACHE_ALIASING) + unsigned short aliasing_num; + unsigned int aliasing_mask; +#endif +}; diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h new file mode 100644 index 000000000..7d6824f7c --- /dev/null +++ b/arch/nds32/include/asm/cacheflush.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_CACHEFLUSH_H__ +#define __NDS32_CACHEFLUSH_H__ + +#include <linux/mm.h> + +#define PG_dcache_dirty PG_arch_1 + +void flush_icache_range(unsigned long start, unsigned long end); +#define flush_icache_range flush_icache_range + +void flush_icache_page(struct vm_area_struct *vma, struct page *page); +#define flush_icache_page flush_icache_page + +#ifdef CONFIG_CPU_CACHE_ALIASING +void flush_cache_mm(struct mm_struct *mm); +void flush_cache_dup_mm(struct mm_struct *mm); +void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +void flush_cache_page(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn); +void flush_cache_kmaps(void); +void flush_cache_vmap(unsigned long start, unsigned long end); +void flush_cache_vunmap(unsigned long start, unsigned long end); + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +void flush_dcache_page(struct page *page); +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, void *src, int len); +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, void *src, int len); + +#define ARCH_HAS_FLUSH_ANON_PAGE +void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr); + +#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +void flush_kernel_dcache_page(struct page *page); +void flush_kernel_vmap_range(void *addr, int size); +void invalidate_kernel_vmap_range(void *addr, int size); +#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) +#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) + +#else +void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len); +#define flush_icache_user_page flush_icache_user_page + +#include <asm-generic/cacheflush.h> +#endif + +#endif /* __NDS32_CACHEFLUSH_H__ */ diff --git a/arch/nds32/include/asm/current.h b/arch/nds32/include/asm/current.h new file mode 100644 index 000000000..65d300961 --- /dev/null +++ b/arch/nds32/include/asm/current.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASM_NDS32_CURRENT_H +#define _ASM_NDS32_CURRENT_H + +#ifndef __ASSEMBLY__ +register struct task_struct *current asm("$r25"); +#endif /* __ASSEMBLY__ */ +#define tsk $r25 + +#endif /* _ASM_NDS32_CURRENT_H */ diff --git a/arch/nds32/include/asm/delay.h b/arch/nds32/include/asm/delay.h new file mode 100644 index 000000000..56ea3894f --- /dev/null +++ b/arch/nds32/include/asm/delay.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_DELAY_H__ +#define __NDS32_DELAY_H__ + +#include <asm/param.h> + +/* There is no clocksource cycle counter in the CPU. */ +static inline void __delay(unsigned long loops) +{ + __asm__ __volatile__(".align 2\n" + "1:\n" + "\taddi\t%0, %0, -1\n" + "\tbgtz\t%0, 1b\n" + :"=r"(loops) + :"0"(loops)); +} + +static inline void __udelay(unsigned long usecs, unsigned long lpj) +{ + usecs *= (unsigned long)(((0x8000000000000000ULL / (500000 / HZ)) + + 0x80000000ULL) >> 32); + usecs = (unsigned long)(((unsigned long long)usecs * lpj) >> 32); + __delay(usecs); +} + +#define udelay(usecs) __udelay((usecs), loops_per_jiffy) + +/* make sure "usecs *= ..." in udelay do not overflow. */ +#if HZ >= 1000 +#define MAX_UDELAY_MS 1 +#elif HZ <= 200 +#define MAX_UDELAY_MS 5 +#else +#define MAX_UDELAY_MS (1000 / HZ) +#endif + +#endif diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h new file mode 100644 index 000000000..1c8e56d70 --- /dev/null +++ b/arch/nds32/include/asm/elf.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASMNDS32_ELF_H +#define __ASMNDS32_ELF_H + +/* + * ELF register definitions.. + */ + +#include <asm/ptrace.h> +#include <asm/fpu.h> +#include <linux/elf-em.h> + +typedef unsigned long elf_greg_t; +typedef unsigned long elf_freg_t[3]; + +extern unsigned int elf_hwcap; + +#define R_NDS32_NONE 0 +#define R_NDS32_16_RELA 19 +#define R_NDS32_32_RELA 20 +#define R_NDS32_9_PCREL_RELA 22 +#define R_NDS32_15_PCREL_RELA 23 +#define R_NDS32_17_PCREL_RELA 24 +#define R_NDS32_25_PCREL_RELA 25 +#define R_NDS32_HI20_RELA 26 +#define R_NDS32_LO12S3_RELA 27 +#define R_NDS32_LO12S2_RELA 28 +#define R_NDS32_LO12S1_RELA 29 +#define R_NDS32_LO12S0_RELA 30 +#define R_NDS32_SDA15S3_RELA 31 +#define R_NDS32_SDA15S2_RELA 32 +#define R_NDS32_SDA15S1_RELA 33 +#define R_NDS32_SDA15S0_RELA 34 +#define R_NDS32_GOT20 37 +#define R_NDS32_25_PLTREL 38 +#define R_NDS32_COPY 39 +#define R_NDS32_GLOB_DAT 40 +#define R_NDS32_JMP_SLOT 41 +#define R_NDS32_RELATIVE 42 +#define R_NDS32_GOTOFF 43 +#define R_NDS32_GOTPC20 44 +#define R_NDS32_GOT_HI20 45 +#define R_NDS32_GOT_LO12 46 +#define R_NDS32_GOTPC_HI20 47 +#define R_NDS32_GOTPC_LO12 48 +#define R_NDS32_GOTOFF_HI20 49 +#define R_NDS32_GOTOFF_LO12 50 +#define R_NDS32_INSN16 51 +#define R_NDS32_LABEL 52 +#define R_NDS32_LONGCALL1 53 +#define R_NDS32_LONGCALL2 54 +#define R_NDS32_LONGCALL3 55 +#define R_NDS32_LONGJUMP1 56 +#define R_NDS32_LONGJUMP2 57 +#define R_NDS32_LONGJUMP3 58 +#define R_NDS32_LOADSTORE 59 +#define R_NDS32_9_FIXED_RELA 60 +#define R_NDS32_15_FIXED_RELA 61 +#define R_NDS32_17_FIXED_RELA 62 +#define R_NDS32_25_FIXED_RELA 63 +#define R_NDS32_PLTREL_HI20 64 +#define R_NDS32_PLTREL_LO12 65 +#define R_NDS32_PLT_GOTREL_HI20 66 +#define R_NDS32_PLT_GOTREL_LO12 67 +#define R_NDS32_LO12S0_ORI_RELA 72 +#define R_NDS32_DWARF2_OP1_RELA 77 +#define R_NDS32_DWARF2_OP2_RELA 78 +#define R_NDS32_DWARF2_LEB_RELA 79 +#define R_NDS32_WORD_9_PCREL_RELA 94 +#define R_NDS32_LONGCALL4 107 +#define R_NDS32_RELA_NOP_MIX 192 +#define R_NDS32_RELA_NOP_MAX 255 + +#define ELF_NGREG (sizeof (struct user_pt_regs) / sizeof(elf_greg_t)) +#define ELF_CORE_COPY_REGS(dest, regs) \ + *(struct user_pt_regs *)&(dest) = (regs)->user_regs; + +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* Core file format: The core file is written in such a way that gdb + can understand it and provide useful information to the user (under + linux we use the 'trad-core' bfd). There are quite a number of + obstacles to being able to view the contents of the floating point + registers, and until these are solved you will not be able to view the + contents of them. Actually, you can read in the core file and look at + the contents of the user struct to find out what the floating point + registers contain. + The actual file contents are as follows: + UPAGE: 1 page consisting of a user struct that tells gdb what is present + in the file. Directly after this is a copy of the task_struct, which + is currently not used by gdb, but it may come in useful at some point. + All of the registers are stored as part of the upage. The upage should + always be only one page. + DATA: The data area is stored. We use current->end_text to + current->brk to pick up all of the user variables, plus any memory + that may have been malloced. No attempt is made to determine if a page + is demand-zero or if a page is totally unused, we just cover the entire + range. All of the addresses are rounded in such a way that an integral + number of pages is written. + STACK: We need the stack information in order to get a meaningful + backtrace. We need to write the data from (esp) to + current->start_stack, so we round each of these off in order to be able + to write an integer number of pages. + The minimum core file size is 3 pages, or 12288 bytes. +*/ + +struct user_fp { + unsigned long long fd_regs[32]; + unsigned long fpcsr; +}; + +typedef struct user_fp elf_fpregset_t; + +struct elf32_hdr; +#define elf_check_arch(x) ((x)->e_machine == EM_NDS32) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#ifdef __NDS32_EB__ +#define ELF_DATA ELFDATA2MSB +#else +#define ELF_DATA ELFDATA2LSB +#endif +#define ELF_ARCH EM_NDS32 +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + +/* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we + have no such handler. */ +#define ELF_PLAT_INIT(_r, load_addr) (_r)->uregs[0] = 0 + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. */ + +#define ELF_HWCAP (elf_hwcap) + +#ifdef __KERNEL__ + +#define ELF_PLATFORM (NULL) + +/* Old NetWinder binaries were compiled in such a way that the iBCS + heuristic always trips on them. Until these binaries become uncommon + enough not to care, don't trust the `ibcs' flag here. In any case + there is no other ELF system currently supported by iBCS. + @@ Could print a warning message to encourage users to upgrade. */ +#define SET_PERSONALITY(ex) set_personality(PER_LINUX) + +#endif + + +#if IS_ENABLED(CONFIG_FPU) +#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPCSR_INIT) +#else +#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0) +#endif + +#define ARCH_DLINFO \ +do { \ + /* Optional FPU initialization */ \ + FPU_AUX_ENT; \ + \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (elf_addr_t)current->mm->context.vdso); \ +} while (0) +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +int arch_setup_additional_pages(struct linux_binprm *, int); + +#endif diff --git a/arch/nds32/include/asm/fixmap.h b/arch/nds32/include/asm/fixmap.h new file mode 100644 index 000000000..5a4bf11e5 --- /dev/null +++ b/arch/nds32/include/asm/fixmap.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_FIXMAP_H +#define __ASM_NDS32_FIXMAP_H + +#ifdef CONFIG_HIGHMEM +#include <linux/threads.h> +#include <asm/kmap_types.h> +#endif + +enum fixed_addresses { + FIX_HOLE, + FIX_KMAP_RESERVED, + FIX_KMAP_BEGIN, +#ifdef CONFIG_HIGHMEM + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS), +#endif + FIX_EARLYCON_MEM_BASE, + __end_of_fixed_addresses +}; +#define FIXADDR_TOP ((unsigned long) (-(16 * PAGE_SIZE))) +#define FIXADDR_SIZE ((__end_of_fixed_addresses) << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXMAP_PAGE_IO __pgprot(PAGE_DEVICE) +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); + +#include <asm-generic/fixmap.h> +#endif /* __ASM_NDS32_FIXMAP_H */ diff --git a/arch/nds32/include/asm/fpu.h b/arch/nds32/include/asm/fpu.h new file mode 100644 index 000000000..8294ed4aa --- /dev/null +++ b/arch/nds32/include/asm/fpu.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2005-2018 Andes Technology Corporation */ + +#ifndef __ASM_NDS32_FPU_H +#define __ASM_NDS32_FPU_H + +#if IS_ENABLED(CONFIG_FPU) +#ifndef __ASSEMBLY__ +#include <linux/sched/task_stack.h> +#include <linux/preempt.h> +#include <asm/ptrace.h> + +extern bool has_fpu; + +extern void save_fpu(struct task_struct *__tsk); +extern void load_fpu(const struct fpu_struct *fpregs); +extern bool do_fpu_exception(unsigned int subtype, struct pt_regs *regs); +extern int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu); + +#define test_tsk_fpu(regs) (regs->fucop_ctl & FUCOP_CTL_mskCP0EN) + +/* + * Initially load the FPU with signalling NANS. This bit pattern + * has the property that no matter whether considered as single or as + * double precision, it still represents a signalling NAN. + */ + +#define sNAN64 0xFFFFFFFFFFFFFFFFULL +#define sNAN32 0xFFFFFFFFUL + +#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) +/* + * Denormalized number is unsupported by nds32 FPU. Hence the operation + * is treated as underflow cases when the final result is a denormalized + * number. To enhance precision, underflow exception trap should be + * enabled by default and kerenl will re-execute it by fpu emulator + * when getting underflow exception. + */ +#define FPCSR_INIT (FPCSR_mskUDFE | FPCSR_mskIEXE) +#else +#define FPCSR_INIT 0x0UL +#endif + +extern const struct fpu_struct init_fpuregs; + +static inline void disable_ptreg_fpu(struct pt_regs *regs) +{ + regs->fucop_ctl &= ~FUCOP_CTL_mskCP0EN; +} + +static inline void enable_ptreg_fpu(struct pt_regs *regs) +{ + regs->fucop_ctl |= FUCOP_CTL_mskCP0EN; +} + +static inline void enable_fpu(void) +{ + unsigned long fucop_ctl; + + fucop_ctl = __nds32__mfsr(NDS32_SR_FUCOP_CTL) | FUCOP_CTL_mskCP0EN; + __nds32__mtsr(fucop_ctl, NDS32_SR_FUCOP_CTL); + __nds32__isb(); +} + +static inline void disable_fpu(void) +{ + unsigned long fucop_ctl; + + fucop_ctl = __nds32__mfsr(NDS32_SR_FUCOP_CTL) & ~FUCOP_CTL_mskCP0EN; + __nds32__mtsr(fucop_ctl, NDS32_SR_FUCOP_CTL); + __nds32__isb(); +} + +static inline void lose_fpu(void) +{ + preempt_disable(); +#if IS_ENABLED(CONFIG_LAZY_FPU) + if (last_task_used_math == current) { + last_task_used_math = NULL; +#else + if (test_tsk_fpu(task_pt_regs(current))) { +#endif + save_fpu(current); + } + disable_ptreg_fpu(task_pt_regs(current)); + preempt_enable(); +} + +static inline void own_fpu(void) +{ + preempt_disable(); +#if IS_ENABLED(CONFIG_LAZY_FPU) + if (last_task_used_math != current) { + if (last_task_used_math != NULL) + save_fpu(last_task_used_math); + load_fpu(¤t->thread.fpu); + last_task_used_math = current; + } +#else + if (!test_tsk_fpu(task_pt_regs(current))) { + load_fpu(¤t->thread.fpu); + } +#endif + enable_ptreg_fpu(task_pt_regs(current)); + preempt_enable(); +} + +#if !IS_ENABLED(CONFIG_LAZY_FPU) +static inline void unlazy_fpu(struct task_struct *tsk) +{ + preempt_disable(); + if (test_tsk_fpu(task_pt_regs(tsk))) + save_fpu(tsk); + preempt_enable(); +} +#endif /* !CONFIG_LAZY_FPU */ +static inline void clear_fpu(struct pt_regs *regs) +{ + preempt_disable(); + if (test_tsk_fpu(regs)) + disable_ptreg_fpu(regs); + preempt_enable(); +} +#endif /* CONFIG_FPU */ +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_NDS32_FPU_H */ diff --git a/arch/nds32/include/asm/fpuemu.h b/arch/nds32/include/asm/fpuemu.h new file mode 100644 index 000000000..63e7ef5f7 --- /dev/null +++ b/arch/nds32/include/asm/fpuemu.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2005-2018 Andes Technology Corporation */ + +#ifndef __ARCH_NDS32_FPUEMU_H +#define __ARCH_NDS32_FPUEMU_H + +/* + * single precision + */ + +void fadds(void *ft, void *fa, void *fb); +void fsubs(void *ft, void *fa, void *fb); +void fmuls(void *ft, void *fa, void *fb); +void fdivs(void *ft, void *fa, void *fb); +void fs2d(void *ft, void *fa); +void fs2si(void *ft, void *fa); +void fs2si_z(void *ft, void *fa); +void fs2ui(void *ft, void *fa); +void fs2ui_z(void *ft, void *fa); +void fsi2s(void *ft, void *fa); +void fui2s(void *ft, void *fa); +void fsqrts(void *ft, void *fa); +void fnegs(void *ft, void *fa); +int fcmps(void *ft, void *fa, void *fb, int cop); + +/* + * double precision + */ +void faddd(void *ft, void *fa, void *fb); +void fsubd(void *ft, void *fa, void *fb); +void fmuld(void *ft, void *fa, void *fb); +void fdivd(void *ft, void *fa, void *fb); +void fsqrtd(void *ft, void *fa); +void fd2s(void *ft, void *fa); +void fd2si(void *ft, void *fa); +void fd2si_z(void *ft, void *fa); +void fd2ui(void *ft, void *fa); +void fd2ui_z(void *ft, void *fa); +void fsi2d(void *ft, void *fa); +void fui2d(void *ft, void *fa); +void fnegd(void *ft, void *fa); +int fcmpd(void *ft, void *fa, void *fb, int cop); + +#endif /* __ARCH_NDS32_FPUEMU_H */ diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h new file mode 100644 index 000000000..2f96cc96a --- /dev/null +++ b/arch/nds32/include/asm/ftrace.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_NDS32_FTRACE_H +#define __ASM_NDS32_FTRACE_H + +#ifdef CONFIG_FUNCTION_TRACER + +#define HAVE_FUNCTION_GRAPH_FP_TEST + +#define MCOUNT_ADDR ((unsigned long)(_mcount)) +/* mcount call is composed of three instructions: + * sethi + ori + jral + */ +#define MCOUNT_INSN_SIZE 12 + +extern void _mcount(unsigned long parent_ip); + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define FTRACE_ADDR ((unsigned long)_ftrace_caller) + +#ifdef __NDS32_EL__ +#define INSN_NOP 0x09000040 +#define INSN_SIZE(insn) (((insn & 0x00000080) == 0) ? 4 : 2) +#define IS_SETHI(insn) ((insn & 0x000000fe) == 0x00000046) +#define ENDIAN_CONVERT(insn) be32_to_cpu(insn) +#else /* __NDS32_EB__ */ +#define INSN_NOP 0x40000009 +#define INSN_SIZE(insn) (((insn & 0x80000000) == 0) ? 4 : 2) +#define IS_SETHI(insn) ((insn & 0xfe000000) == 0x46000000) +#define ENDIAN_CONVERT(insn) (insn) +#endif + +extern void _ftrace_caller(unsigned long parent_ip); +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} +struct dyn_arch_ftrace { +}; + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#endif /* CONFIG_FUNCTION_TRACER */ + +#endif /* __ASM_NDS32_FTRACE_H */ diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h new file mode 100644 index 000000000..4223f473b --- /dev/null +++ b/arch/nds32/include/asm/futex.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_FUTEX_H__ +#define __NDS32_FUTEX_H__ + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/errno.h> + +#define __futex_atomic_ex_table(err_reg) \ + " .pushsection __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4f\n" \ + " .long 2b, 4f\n" \ + " .popsection\n" \ + " .pushsection .fixup,\"ax\"\n" \ + "4: move %0, " err_reg "\n" \ + " b 3b\n" \ + " .popsection" + +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ + smp_mb(); \ + asm volatile( \ + " movi $ta, #0\n" \ + "1: llw %1, [%2+$ta]\n" \ + " " insn "\n" \ + "2: scw %0, [%2+$ta]\n" \ + " beqz %0, 1b\n" \ + " movi %0, #0\n" \ + "3:\n" \ + __futex_atomic_ex_table("%4") \ + : "=&r" (ret), "=&r" (oldval) \ + : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ + : "cc", "memory") +static inline int +futex_atomic_cmpxchg_inatomic(u32 * uval, u32 __user * uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 val, tmp, flags; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + smp_mb(); + asm volatile (" movi $ta, #0\n" + "1: llw %1, [%6 + $ta]\n" + " sub %3, %1, %4\n" + " cmovz %2, %5, %3\n" + " cmovn %2, %1, %3\n" + "2: scw %2, [%6 + $ta]\n" + " beqz %2, 1b\n" + "3:\n " __futex_atomic_ex_table("%7") + :"+&r"(ret), "=&r"(val), "=&r"(tmp), "=&r"(flags) + :"r"(oldval), "r"(newval), "r"(uaddr), "i"(-EFAULT) + :"$ta", "memory"); + smp_mb(); + + *uval = val; + return ret; +} + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("move %0, %3", ret, oldval, tmp, uaddr, + oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %0, %1, %3", ret, oldval, tmp, uaddr, + oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %0, %1, %3", ret, oldval, tmp, uaddr, + oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and %0, %1, %3", ret, oldval, tmp, uaddr, + ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %0, %1, %3", ret, oldval, tmp, uaddr, + oparg); + break; + default: + ret = -ENOSYS; + } + + if (!ret) + *oval = oldval; + + return ret; +} +#endif /* __NDS32_FUTEX_H__ */ diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h new file mode 100644 index 000000000..fe986d0e6 --- /dev/null +++ b/arch/nds32/include/asm/highmem.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#include <asm/proc-fns.h> +#include <asm/kmap_types.h> +#include <asm/fixmap.h> + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. + */ +/* + * Ordering is (from lower to higher memory addresses): + * + * high_memory + * Persistent kmap area + * PKMAP_BASE + * fixed_addresses + * FIXADDR_START + * FIXADDR_TOP + * Vmalloc area + * VMALLOC_START + * VMALLOC_END + */ +#define PKMAP_BASE ((FIXADDR_START - PGDIR_SIZE) & (PGDIR_MASK)) +#define LAST_PKMAP PTRS_PER_PTE +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) (((virt) - (PKMAP_BASE)) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +static inline void flush_cache_kmaps(void) +{ + cpu_dcache_wbinval_all(); +} + +/* declarations for highmem.c */ +extern unsigned long highstart_pfn, highend_pfn; + +extern pte_t *pkmap_page_table; + +extern void kmap_init(void); + +/* + * The following functions are already defined by <linux/highmem.h> + * when CONFIG_HIGHMEM is not set. + */ +#ifdef CONFIG_HIGHMEM +extern void *kmap_atomic_pfn(unsigned long pfn); +#endif + +#endif diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h new file mode 100644 index 000000000..e57378d04 --- /dev/null +++ b/arch/nds32/include/asm/io.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_IO_H +#define __ASM_NDS32_IO_H + +#include <linux/types.h> + +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("sbi %0, [%1]" : : "r" (val), "r" (addr)); +} + +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("shi %0, [%1]" : : "r" (val), "r" (addr)); +} + +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("swi %0, [%1]" : : "r" (val), "r" (addr)); +} + +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + + asm volatile("lbi %0, [%1]" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + + asm volatile("lhi %0, [%1]" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + + asm volatile("lwi %0, [%1]" : "=r" (val) : "r" (addr)); + return val; +} + +#define __iormb() rmb() +#define __iowmb() wmb() + +/* + * {read,write}{b,w,l,q}_relaxed() are like the regular version, but + * are not guaranteed to provide ordering against spinlocks or memory + * accesses. + */ + +#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) +#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) +#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) +#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) +#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) +#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) + +/* + * {read,write}{b,w,l,q}() access little endian memory and return result in + * native endianness. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) + +#include <asm-generic/io.h> + +#endif /* __ASM_NDS32_IO_H */ diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h new file mode 100644 index 000000000..51ef800bb --- /dev/null +++ b/arch/nds32/include/asm/irqflags.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#include <asm/nds32.h> +#include <nds32_intrinsic.h> + +#define arch_local_irq_disable() \ + GIE_DISABLE(); + +#define arch_local_irq_enable() \ + GIE_ENABLE(); +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + flags = __nds32__mfsr(NDS32_SR_PSW) & PSW_mskGIE; + GIE_DISABLE(); + return flags; +} + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + flags = __nds32__mfsr(NDS32_SR_PSW) & PSW_mskGIE; + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + if(flags) + GIE_ENABLE(); +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return !flags; +} + +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} diff --git a/arch/nds32/include/asm/l2_cache.h b/arch/nds32/include/asm/l2_cache.h new file mode 100644 index 000000000..3ea48e19e --- /dev/null +++ b/arch/nds32/include/asm/l2_cache.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef L2_CACHE_H +#define L2_CACHE_H + +/* CCTL_CMD_OP */ +#define L2_CA_CONF_OFF 0x0 +#define L2_IF_CONF_OFF 0x4 +#define L2CC_SETUP_OFF 0x8 +#define L2CC_PROT_OFF 0xC +#define L2CC_CTRL_OFF 0x10 +#define L2_INT_EN_OFF 0x20 +#define L2_STA_OFF 0x24 +#define RDERR_ADDR_OFF 0x28 +#define WRERR_ADDR_OFF 0x2c +#define EVDPTERR_ADDR_OFF 0x30 +#define IMPL3ERR_ADDR_OFF 0x34 +#define L2_CNT0_CTRL_OFF 0x40 +#define L2_EVNT_CNT0_OFF 0x44 +#define L2_CNT1_CTRL_OFF 0x48 +#define L2_EVNT_CNT1_OFF 0x4c +#define L2_CCTL_CMD_OFF 0x60 +#define L2_CCTL_STATUS_OFF 0x64 +#define L2_LINE_TAG_OFF 0x68 +#define L2_LINE_DPT_OFF 0x70 + +#define CCTL_CMD_L2_IX_INVAL 0x0 +#define CCTL_CMD_L2_PA_INVAL 0x1 +#define CCTL_CMD_L2_IX_WB 0x2 +#define CCTL_CMD_L2_PA_WB 0x3 +#define CCTL_CMD_L2_PA_WBINVAL 0x5 +#define CCTL_CMD_L2_SYNC 0xa + +/* CCTL_CMD_TYPE */ +#define CCTL_SINGLE_CMD 0 +#define CCTL_BLOCK_CMD 0x10 +#define CCTL_ALL_CMD 0x10 + +/****************************************************************************** + * L2_CA_CONF (Cache architecture configuration) + *****************************************************************************/ +#define L2_CA_CONF_offL2SET 0 +#define L2_CA_CONF_offL2WAY 4 +#define L2_CA_CONF_offL2CLSZ 8 +#define L2_CA_CONF_offL2DW 11 +#define L2_CA_CONF_offL2PT 14 +#define L2_CA_CONF_offL2VER 16 + +#define L2_CA_CONF_mskL2SET (0xFUL << L2_CA_CONF_offL2SET) +#define L2_CA_CONF_mskL2WAY (0xFUL << L2_CA_CONF_offL2WAY) +#define L2_CA_CONF_mskL2CLSZ (0x7UL << L2_CA_CONF_offL2CLSZ) +#define L2_CA_CONF_mskL2DW (0x7UL << L2_CA_CONF_offL2DW) +#define L2_CA_CONF_mskL2PT (0x3UL << L2_CA_CONF_offL2PT) +#define L2_CA_CONF_mskL2VER (0xFFFFUL << L2_CA_CONF_offL2VER) + +/****************************************************************************** + * L2CC_SETUP (L2CC Setup register) + *****************************************************************************/ +#define L2CC_SETUP_offPART 0 +#define L2CC_SETUP_mskPART (0x3UL << L2CC_SETUP_offPART) +#define L2CC_SETUP_offDDLATC 4 +#define L2CC_SETUP_mskDDLATC (0x3UL << L2CC_SETUP_offDDLATC) +#define L2CC_SETUP_offTDLATC 8 +#define L2CC_SETUP_mskTDLATC (0x3UL << L2CC_SETUP_offTDLATC) + +/****************************************************************************** + * L2CC_PROT (L2CC Protect register) + *****************************************************************************/ +#define L2CC_PROT_offMRWEN 31 +#define L2CC_PROT_mskMRWEN (0x1UL << L2CC_PROT_offMRWEN) + +/****************************************************************************** + * L2_CCTL_STATUS_Mn (The L2CCTL command working status for Master n) + *****************************************************************************/ +#define L2CC_CTRL_offEN 31 +#define L2CC_CTRL_mskEN (0x1UL << L2CC_CTRL_offEN) + +/****************************************************************************** + * L2_CCTL_STATUS_Mn (The L2CCTL command working status for Master n) + *****************************************************************************/ +#define L2_CCTL_STATUS_offCMD_COMP 31 +#define L2_CCTL_STATUS_mskCMD_COMP (0x1 << L2_CCTL_STATUS_offCMD_COMP) + +extern void __iomem *atl2c_base; +#include <linux/smp.h> +#include <asm/io.h> +#include <asm/bitfield.h> + +#define L2C_R_REG(offset) readl(atl2c_base + offset) +#define L2C_W_REG(offset, value) writel(value, atl2c_base + offset) + +#define L2_CMD_RDY() \ + do{;}while((L2C_R_REG(L2_CCTL_STATUS_OFF) & L2_CCTL_STATUS_mskCMD_COMP) == 0) + +static inline unsigned long L2_CACHE_SET(void) +{ + return 64 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2SET) >> + L2_CA_CONF_offL2SET); +} + +static inline unsigned long L2_CACHE_WAY(void) +{ + return 1 + + ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2WAY) >> + L2_CA_CONF_offL2WAY); +} + +static inline unsigned long L2_CACHE_LINE_SIZE(void) +{ + + return 4 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2CLSZ) >> + L2_CA_CONF_offL2CLSZ); +} + +static inline unsigned long GET_L2CC_CTRL_CPU(unsigned long cpu) +{ + if (cpu == smp_processor_id()) + return L2C_R_REG(L2CC_CTRL_OFF); + return L2C_R_REG(L2CC_CTRL_OFF + (cpu << 8)); +} + +static inline void SET_L2CC_CTRL_CPU(unsigned long cpu, unsigned long val) +{ + if (cpu == smp_processor_id()) + L2C_W_REG(L2CC_CTRL_OFF, val); + else + L2C_W_REG(L2CC_CTRL_OFF + (cpu << 8), val); +} + +static inline unsigned long GET_L2CC_STATUS_CPU(unsigned long cpu) +{ + if (cpu == smp_processor_id()) + return L2C_R_REG(L2_CCTL_STATUS_OFF); + return L2C_R_REG(L2_CCTL_STATUS_OFF + (cpu << 8)); +} +#endif diff --git a/arch/nds32/include/asm/linkage.h b/arch/nds32/include/asm/linkage.h new file mode 100644 index 000000000..a696469ab --- /dev/null +++ b/arch/nds32/include/asm/linkage.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +/* This file is required by include/linux/linkage.h */ +#define __ALIGN .align 2 +#define __ALIGN_STR ".align 2" + +#endif diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h new file mode 100644 index 000000000..940d32842 --- /dev/null +++ b/arch/nds32/include/asm/memory.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_MEMORY_H +#define __ASM_NDS32_MEMORY_H + +#include <linux/compiler.h> +#include <linux/sizes.h> + +#ifndef __ASSEMBLY__ +#include <asm/page.h> +#endif + +#ifndef PHYS_OFFSET +#define PHYS_OFFSET (0x0) +#endif + +/* + * TASK_SIZE - the maximum size of a user space task. + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area + */ +#define TASK_SIZE ((CONFIG_PAGE_OFFSET) - (SZ_32M)) +#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_32M) +#define PAGE_OFFSET (CONFIG_PAGE_OFFSET) + +/* + * Physical vs virtual RAM address space conversion. These are + * private definitions which should NOT be used outside memory.h + * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + */ +#ifndef __virt_to_phys +#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) +#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) +#endif + +/* + * The module space lives between the addresses given by TASK_SIZE + * and PAGE_OFFSET - it must be within 32MB of the kernel text. + */ +#define MODULES_END (PAGE_OFFSET) +#define MODULES_VADDR (MODULES_END - SZ_32M) + +#if TASK_SIZE > MODULES_VADDR +#error Top of user space clashes with start of module space +#endif + +#ifndef __ASSEMBLY__ + +/* + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0. + * + * This is the PFN of the first RAM page in the kernel + * direct-mapped view. We assume this is the first page + * of RAM in the mem_map as well. + */ +#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) + +/* + * Drivers should NOT use these either. + */ +#define __pa(x) __virt_to_phys((unsigned long)(x)) +#define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) + +/* + * Conversion between a struct page and a physical address. + * + * Note: when converting an unknown physical address to a + * struct page, the resulting pointer must be validated + * using VALID_PAGE(). It must return an invalid struct page + * for any physical address not corresponding to a system + * RAM address. + * + * pfn_valid(pfn) indicates whether a PFN number is valid + * + * virt_to_page(k) convert a _valid_ virtual address to struct page * + * virt_addr_valid(k) indicates whether a virtual address is valid + */ +#ifndef CONFIG_DISCONTIGMEM + +#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET +#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) + +#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) +#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) + +#else /* CONFIG_DISCONTIGMEM */ +#error CONFIG_DISCONTIGMEM is not supported yet. +#endif /* !CONFIG_DISCONTIGMEM */ + +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +#endif + +#include <asm-generic/memory_model.h> + +#endif diff --git a/arch/nds32/include/asm/mmu.h b/arch/nds32/include/asm/mmu.h new file mode 100644 index 000000000..89d63afee --- /dev/null +++ b/arch/nds32/include/asm/mmu.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_MMU_H +#define __NDS32_MMU_H + +typedef struct { + unsigned int id; + void *vdso; +} mm_context_t; + +#endif diff --git a/arch/nds32/include/asm/mmu_context.h b/arch/nds32/include/asm/mmu_context.h new file mode 100644 index 000000000..b8fd3d189 --- /dev/null +++ b/arch/nds32/include/asm/mmu_context.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_MMU_CONTEXT_H +#define __ASM_NDS32_MMU_CONTEXT_H + +#include <linux/spinlock.h> +#include <asm/tlbflush.h> +#include <asm/proc-fns.h> +#include <asm-generic/mm_hooks.h> + +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context.id = 0; + return 0; +} + +#define destroy_context(mm) do { } while(0) + +#define CID_BITS 9 +extern spinlock_t cid_lock; +extern unsigned int cpu_last_cid; + +static inline void __new_context(struct mm_struct *mm) +{ + unsigned int cid; + unsigned long flags; + + spin_lock_irqsave(&cid_lock, flags); + cid = cpu_last_cid; + cpu_last_cid += 1 << TLB_MISC_offCID; + if (cpu_last_cid == 0) + cpu_last_cid = 1 << TLB_MISC_offCID << CID_BITS; + + if ((cid & TLB_MISC_mskCID) == 0) + flush_tlb_all(); + spin_unlock_irqrestore(&cid_lock, flags); + + mm->context.id = cid; +} + +static inline void check_context(struct mm_struct *mm) +{ + if (unlikely + ((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS)) + __new_context(mm); +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { + check_context(next); + cpu_switch_mm(next); + } +} + +#define deactivate_mm(tsk,mm) do { } while (0) +#define activate_mm(prev,next) switch_mm(prev, next, NULL) + +#endif diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h new file mode 100644 index 000000000..4994f6a9e --- /dev/null +++ b/arch/nds32/include/asm/nds32.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASM_NDS32_NDS32_H_ +#define _ASM_NDS32_NDS32_H_ + +#include <asm/bitfield.h> +#include <asm/cachectl.h> + +#ifndef __ASSEMBLY__ +#include <linux/init.h> +#include <asm/barrier.h> +#include <nds32_intrinsic.h> + +#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +#define FP_OFFSET (-3) +#else +#define FP_OFFSET (-2) +#endif +#define LP_OFFSET (-1) + +extern void __init early_trap_init(void); +static inline void GIE_ENABLE(void) +{ + mb(); + __nds32__gie_en(); +} + +static inline void GIE_DISABLE(void) +{ + mb(); + __nds32__gie_dis(); +} + +static inline unsigned long CACHE_SET(unsigned char cache) +{ + + if (cache == ICACHE) + return 64 << ((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskISET) >> + ICM_CFG_offISET); + else + return 64 << ((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDSET) >> + DCM_CFG_offDSET); +} + +static inline unsigned long CACHE_WAY(unsigned char cache) +{ + + if (cache == ICACHE) + return 1 + + ((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskIWAY) >> ICM_CFG_offIWAY); + else + return 1 + + ((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDWAY) >> DCM_CFG_offDWAY); +} + +static inline unsigned long CACHE_LINE_SIZE(unsigned char cache) +{ + + if (cache == ICACHE) + return 8 << + (((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskISZ) >> ICM_CFG_offISZ) - 1); + else + return 8 << + (((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDSZ) >> DCM_CFG_offDSZ) - 1); +} + +#endif /* __ASSEMBLY__ */ + +#define IVB_BASE PHYS_OFFSET /* in user space for intr/exc/trap/break table base, 64KB aligned + * We defined at the start of the physical memory */ + +/* dispatched sub-entry exception handler numbering */ +#define RD_PROT 0 /* read protrection */ +#define WRT_PROT 1 /* write protection */ +#define NOEXEC 2 /* non executable */ +#define PAGE_MODIFY 3 /* page modified */ +#define ACC_BIT 4 /* access bit */ +#define RESVED_PTE 5 /* reserved PTE attribute */ +/* reserved 6 ~ 16 */ + +#endif /* _ASM_NDS32_NDS32_H_ */ diff --git a/arch/nds32/include/asm/nds32_fpu_inst.h b/arch/nds32/include/asm/nds32_fpu_inst.h new file mode 100644 index 000000000..1e4b86a90 --- /dev/null +++ b/arch/nds32/include/asm/nds32_fpu_inst.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2005-2018 Andes Technology Corporation */ + +#ifndef __NDS32_FPU_INST_H +#define __NDS32_FPU_INST_H + +#define cop0_op 0x35 + +/* + * COP0 field of opcodes. + */ +#define fs1_op 0x0 +#define fs2_op 0x4 +#define fd1_op 0x8 +#define fd2_op 0xc + +/* + * FS1 opcode. + */ +enum fs1 { + fadds_op, fsubs_op, fcpynss_op, fcpyss_op, + fmadds_op, fmsubs_op, fcmovns_op, fcmovzs_op, + fnmadds_op, fnmsubs_op, + fmuls_op = 0xc, fdivs_op, + fs1_f2op_op = 0xf +}; + +/* + * FS1/F2OP opcode. + */ +enum fs1_f2 { + fs2d_op, fsqrts_op, + fui2s_op = 0x8, fsi2s_op = 0xc, + fs2ui_op = 0x10, fs2ui_z_op = 0x14, + fs2si_op = 0x18, fs2si_z_op = 0x1c +}; + +/* + * FS2 opcode. + */ +enum fs2 { + fcmpeqs_op, fcmpeqs_e_op, fcmplts_op, fcmplts_e_op, + fcmples_op, fcmples_e_op, fcmpuns_op, fcmpuns_e_op +}; + +/* + * FD1 opcode. + */ +enum fd1 { + faddd_op, fsubd_op, fcpynsd_op, fcpysd_op, + fmaddd_op, fmsubd_op, fcmovnd_op, fcmovzd_op, + fnmaddd_op, fnmsubd_op, + fmuld_op = 0xc, fdivd_op, fd1_f2op_op = 0xf +}; + +/* + * FD1/F2OP opcode. + */ +enum fd1_f2 { + fd2s_op, fsqrtd_op, + fui2d_op = 0x8, fsi2d_op = 0xc, + fd2ui_op = 0x10, fd2ui_z_op = 0x14, + fd2si_op = 0x18, fd2si_z_op = 0x1c +}; + +/* + * FD2 opcode. + */ +enum fd2 { + fcmpeqd_op, fcmpeqd_e_op, fcmpltd_op, fcmpltd_e_op, + fcmpled_op, fcmpled_e_op, fcmpund_op, fcmpund_e_op +}; + +#define NDS32Insn(x) x + +#define I_OPCODE_off 25 +#define NDS32Insn_OPCODE(x) (NDS32Insn(x) >> I_OPCODE_off) + +#define I_OPCODE_offRt 20 +#define I_OPCODE_mskRt (0x1fUL << I_OPCODE_offRt) +#define NDS32Insn_OPCODE_Rt(x) \ + ((NDS32Insn(x) & I_OPCODE_mskRt) >> I_OPCODE_offRt) + +#define I_OPCODE_offRa 15 +#define I_OPCODE_mskRa (0x1fUL << I_OPCODE_offRa) +#define NDS32Insn_OPCODE_Ra(x) \ + ((NDS32Insn(x) & I_OPCODE_mskRa) >> I_OPCODE_offRa) + +#define I_OPCODE_offRb 10 +#define I_OPCODE_mskRb (0x1fUL << I_OPCODE_offRb) +#define NDS32Insn_OPCODE_Rb(x) \ + ((NDS32Insn(x) & I_OPCODE_mskRb) >> I_OPCODE_offRb) + +#define I_OPCODE_offbit1014 10 +#define I_OPCODE_mskbit1014 (0x1fUL << I_OPCODE_offbit1014) +#define NDS32Insn_OPCODE_BIT1014(x) \ + ((NDS32Insn(x) & I_OPCODE_mskbit1014) >> I_OPCODE_offbit1014) + +#define I_OPCODE_offbit69 6 +#define I_OPCODE_mskbit69 (0xfUL << I_OPCODE_offbit69) +#define NDS32Insn_OPCODE_BIT69(x) \ + ((NDS32Insn(x) & I_OPCODE_mskbit69) >> I_OPCODE_offbit69) + +#define I_OPCODE_offCOP0 0 +#define I_OPCODE_mskCOP0 (0x3fUL << I_OPCODE_offCOP0) +#define NDS32Insn_OPCODE_COP0(x) \ + ((NDS32Insn(x) & I_OPCODE_mskCOP0) >> I_OPCODE_offCOP0) + +#endif /* __NDS32_FPU_INST_H */ diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h new file mode 100644 index 000000000..add33a7f0 --- /dev/null +++ b/arch/nds32/include/asm/page.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2005-2017 Andes Technology Corporation + */ + +#ifndef _ASMNDS32_PAGE_H +#define _ASMNDS32_PAGE_H + +#ifdef CONFIG_ANDES_PAGE_SIZE_4KB +#define PAGE_SHIFT 12 +#endif +#ifdef CONFIG_ANDES_PAGE_SIZE_8KB +#define PAGE_SHIFT 13 +#endif +#include <linux/const.h> +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +struct page; +struct vm_area_struct; +#ifdef CONFIG_CPU_CACHE_ALIASING +extern void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +extern void clear_user_highpage(struct page *page, unsigned long vaddr); + +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *to); +void clear_user_page(void *addr, unsigned long vaddr, struct page *page); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +#define clear_user_highpage clear_user_highpage +#else +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) +#endif + +void clear_page(void *page); +void copy_page(void *to, void *from); + +typedef unsigned long pte_t; +typedef unsigned long pgd_t; +typedef unsigned long pgprot_t; + +#define pte_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +typedef struct page *pgtable_t; + +#include <asm/memory.h> +#include <asm-generic/getorder.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif diff --git a/arch/nds32/include/asm/perf_event.h b/arch/nds32/include/asm/perf_event.h new file mode 100644 index 000000000..fcdff02ac --- /dev/null +++ b/arch/nds32/include/asm/perf_event.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2008-2018 Andes Technology Corporation */ + +#ifndef __ASM_PERF_EVENT_H +#define __ASM_PERF_EVENT_H + +/* + * This file is request by Perf, + * please refer to tools/perf/design.txt for more details + */ +struct pt_regs; +unsigned long perf_instruction_pointer(struct pt_regs *regs); +unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) + +#endif diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h new file mode 100644 index 000000000..85c117347 --- /dev/null +++ b/arch/nds32/include/asm/pgalloc.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASMNDS32_PGALLOC_H +#define _ASMNDS32_PGALLOC_H + +#include <asm/processor.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include <asm/proc-fns.h> + +#define __HAVE_ARCH_PTE_ALLOC_ONE +#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ + +/* + * Since we have only two-level page tables, these are trivial + */ +#define pmd_pgtable(pmd) pmd_page(pmd) + +extern pgd_t *pgd_alloc(struct mm_struct *mm); +extern void pgd_free(struct mm_struct *mm, pgd_t * pgd); + +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) +{ + pgtable_t pte; + + pte = __pte_alloc_one(mm, GFP_PGTABLE_USER); + if (pte) + cpu_dcache_wb_page((unsigned long)page_address(pte)); + + return pte; +} + +/* + * Populate the pmdp entry with a pointer to the pte. This pmd is part + * of the mm address space. + * + * Ensure that we always set both PMD entries. + */ +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmdp, pte_t * ptep) +{ + unsigned long pte_ptr = (unsigned long)ptep; + unsigned long pmdval; + + BUG_ON(mm != &init_mm); + + /* + * The pmd must be loaded with the physical + * address of the PTE table + */ + pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; + set_pmd(pmdp, __pmd(pmdval)); +} + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t * pmdp, pgtable_t ptep) +{ + unsigned long pmdval; + + BUG_ON(mm == &init_mm); + + pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; + set_pmd(pmdp, __pmd(pmdval)); +} + +#endif diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h new file mode 100644 index 000000000..419f984ee --- /dev/null +++ b/arch/nds32/include/asm/pgtable.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASMNDS32_PGTABLE_H +#define _ASMNDS32_PGTABLE_H + +#include <asm-generic/pgtable-nopmd.h> +#include <linux/sizes.h> + +#include <asm/memory.h> +#include <asm/nds32.h> +#ifndef __ASSEMBLY__ +#include <asm/fixmap.h> +#include <nds32_intrinsic.h> +#endif + +#ifdef CONFIG_ANDES_PAGE_SIZE_4KB +#define PGDIR_SHIFT 22 +#define PTRS_PER_PGD 1024 +#define PTRS_PER_PTE 1024 +#endif + +#ifdef CONFIG_ANDES_PAGE_SIZE_8KB +#define PGDIR_SHIFT 24 +#define PTRS_PER_PGD 256 +#define PTRS_PER_PTE 2048 +#endif + +#ifndef __ASSEMBLY__ +extern void __pte_error(const char *file, int line, unsigned long val); +extern void __pgd_error(const char *file, int line, unsigned long val); + +#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) +#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) +#endif /* !__ASSEMBLY__ */ + +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * This is the lowest virtual address we can permit any user space + * mapping to be mapped at. This is particularly important for + * non-high vector CPUs. + */ +#define FIRST_USER_ADDRESS 0x8000 + +#ifdef CONFIG_HIGHMEM +#define CONSISTENT_BASE ((PKMAP_BASE) - (SZ_2M)) +#define CONSISTENT_END (PKMAP_BASE) +#else +#define CONSISTENT_BASE (FIXADDR_START - SZ_2M) +#define CONSISTENT_END (FIXADDR_START) +#endif +#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) + +#ifdef CONFIG_HIGHMEM +#ifndef __ASSEMBLY__ +#include <asm/highmem.h> +#endif +#endif + +#define VMALLOC_RESERVE SZ_128M +#define VMALLOC_END (CONSISTENT_BASE - PAGE_SIZE) +#define VMALLOC_START ((VMALLOC_END) - VMALLOC_RESERVE) +#define VMALLOC_VMADDR(x) ((unsigned long)(x)) +#define MAXMEM __pa(VMALLOC_START) +#define MAXMEM_PFN PFN_DOWN(MAXMEM) + +#define FIRST_USER_PGD_NR 0 +#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) + FIRST_USER_PGD_NR) + +/* L2 PTE */ +#define _PAGE_V (1UL << 0) + +#define _PAGE_M_XKRW (0UL << 1) +#define _PAGE_M_UR_KR (1UL << 1) +#define _PAGE_M_UR_KRW (2UL << 1) +#define _PAGE_M_URW_KRW (3UL << 1) +#define _PAGE_M_KR (5UL << 1) +#define _PAGE_M_KRW (7UL << 1) + +#define _PAGE_D (1UL << 4) +#define _PAGE_E (1UL << 5) +#define _PAGE_A (1UL << 6) +#define _PAGE_G (1UL << 7) + +#define _PAGE_C_DEV (0UL << 8) +#define _PAGE_C_DEV_WB (1UL << 8) +#define _PAGE_C_MEM (2UL << 8) +#define _PAGE_C_MEM_SHRD_WB (4UL << 8) +#define _PAGE_C_MEM_SHRD_WT (5UL << 8) +#define _PAGE_C_MEM_WB (6UL << 8) +#define _PAGE_C_MEM_WT (7UL << 8) + +#define _PAGE_L (1UL << 11) + +#define _HAVE_PAGE_L (_PAGE_L) +#define _PAGE_FILE (1UL << 1) +#define _PAGE_YOUNG 0 +#define _PAGE_M_MASK _PAGE_M_KRW +#define _PAGE_C_MASK _PAGE_C_MEM_WT + +#ifdef CONFIG_SMP +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH +#define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WT +#else +#define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WB +#endif +#else +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH +#define _PAGE_CACHE_SHRD _PAGE_C_MEM_WT +#else +#define _PAGE_CACHE_SHRD _PAGE_C_MEM_WB +#endif +#endif + +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH +#define _PAGE_CACHE _PAGE_C_MEM_WT +#else +#define _PAGE_CACHE _PAGE_C_MEM_WB +#endif + +#define _PAGE_IOREMAP \ + (_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) + +/* + * + Level 1 descriptor (PMD) + */ +#define PMD_TYPE_TABLE 0 + +#ifndef __ASSEMBLY__ + +#define _PAGE_USER_TABLE PMD_TYPE_TABLE +#define _PAGE_KERNEL_TABLE PMD_TYPE_TABLE + +#define PAGE_EXEC __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_E) +#define PAGE_NONE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_A) +#define PAGE_READ __pgprot(_PAGE_V | _PAGE_M_UR_KR) +#define PAGE_RDWR __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D) +#define PAGE_COPY __pgprot(_PAGE_V | _PAGE_M_UR_KR) + +#define PAGE_UXKRWX_V1 __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) +#define PAGE_UXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) +#define PAGE_URXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_UR_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) +#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) +#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) +#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) +#define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD) +#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) +#endif /* __ASSEMBLY__ */ + +/* xwr */ +#define __P000 (PAGE_NONE | _PAGE_CACHE_SHRD) +#define __P001 (PAGE_READ | _PAGE_CACHE_SHRD) +#define __P010 (PAGE_COPY | _PAGE_CACHE_SHRD) +#define __P011 (PAGE_COPY | _PAGE_CACHE_SHRD) +#define __P100 (PAGE_EXEC | _PAGE_CACHE_SHRD) +#define __P101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD) +#define __P110 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD) +#define __P111 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD) + +#define __S000 (PAGE_NONE | _PAGE_CACHE_SHRD) +#define __S001 (PAGE_READ | _PAGE_CACHE_SHRD) +#define __S010 (PAGE_RDWR | _PAGE_CACHE_SHRD) +#define __S011 (PAGE_RDWR | _PAGE_CACHE_SHRD) +#define __S100 (PAGE_EXEC | _PAGE_CACHE_SHRD) +#define __S101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD) +#define __S110 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD) +#define __S111 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD) + +#ifndef __ASSEMBLY__ +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern struct page *empty_zero_page; +extern void paging_init(void); +#define ZERO_PAGE(vaddr) (empty_zero_page) + +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) + +#define pte_none(pte) !(pte_val(pte)) +#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) +#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) + +static unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)); +} + +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) +/* + * Set a level 1 translation table entry, and clean it out of + * any caches such that the MMUs can load it correctly. + */ +static inline void set_pmd(pmd_t * pmdp, pmd_t pmd) +{ + + *pmdp = pmd; +#if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) + __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (pmdp):"memory"); + __nds32__msync_all(); + __nds32__dsb(); +#endif +} + +/* + * Set a PTE and flush it out + */ +static inline void set_pte(pte_t * ptep, pte_t pte) +{ + + *ptep = pte; +#if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) + __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (ptep):"memory"); + __nds32__msync_all(); + __nds32__dsb(); +#endif +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ + +/* + * pte_write: this page is writeable for user mode + * pte_read: this page is readable for user mode + * pte_kernel_write: this page is writeable for kernel mode + * + * We don't have pte_kernel_read because kernel always can read. + * + * */ + +#define pte_present(pte) (pte_val(pte) & _PAGE_V) +#define pte_write(pte) ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) +#define pte_read(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KR) || \ + ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \ + ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW)) +#define pte_kernel_write(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) || \ + ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \ + ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_KRW) || \ + (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_XKRW) && pte_exec(pte))) +#define pte_exec(pte) (pte_val(pte) & _PAGE_E) +#define pte_dirty(pte) (pte_val(pte) & _PAGE_D) +#define pte_young(pte) (pte_val(pte) & _PAGE_YOUNG) + +/* + * The following only works if pte_present() is not true. + */ +#define pte_file(pte) (pte_val(pte) & _PAGE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 2) +#define pgoff_to_pte(x) __pte(((x) << 2) | _PAGE_FILE) + +#define PTE_FILE_MAX_BITS 29 + +#define PTE_BIT_FUNC(fn,op) \ +static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK; + pte_val(pte) = pte_val(pte) | _PAGE_M_UR_KR; + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK; + pte_val(pte) = pte_val(pte) | _PAGE_M_URW_KRW; + return pte; +} + +PTE_BIT_FUNC(exprotect, &=~_PAGE_E); +PTE_BIT_FUNC(mkexec, |=_PAGE_E); +PTE_BIT_FUNC(mkclean, &=~_PAGE_D); +PTE_BIT_FUNC(mkdirty, |=_PAGE_D); +PTE_BIT_FUNC(mkold, &=~_PAGE_YOUNG); +PTE_BIT_FUNC(mkyoung, |=_PAGE_YOUNG); + +/* + * Mark the prot value as uncacheable and unbufferable. + */ +#define pgprot_noncached(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV) +#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV_WB) + +#define pmd_none(pmd) (pmd_val(pmd)&0x1) +#define pmd_present(pmd) (!pmd_none(pmd)) +#define pmd_bad(pmd) pmd_none(pmd) + +#define copy_pmd(pmdpd,pmdps) set_pmd((pmdpd), *(pmdps)) +#define pmd_clear(pmdp) set_pmd((pmdp), __pmd(1)) + +static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot) +{ + unsigned long ptr = (unsigned long)ptep; + pmd_t pmd; + + /* + * The pmd must be loaded with the physical + * address of the PTE table + */ + + pmd_val(pmd) = __virt_to_phys(ptr) | prot; + return pmd; +} + +#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) + +/* + * Permanent address of a page. We never have highmem, so this is trivial. + */ +#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_present(pgd) (1) +#define pgd_clear(pgdp) do { } while (0) + +#define page_pte_prot(page,prot) mk_pte(page, prot) +#define page_pte(page) mk_pte(page, __pgprot(0)) +/* + * L1PTE = $mr1 + ((virt >> PMD_SHIFT) << 2); + * L2PTE = (((virt >> PAGE_SHIFT) & (PTRS_PER_PTE -1 )) << 2); + * PPN = (phys & 0xfffff000); + * +*/ + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + const unsigned long mask = 0xfff; + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); + return pte; +} + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* Encode and decode a swap entry. + * + * We support up to 32GB of swap on 4k machines + */ +#define __swp_type(x) (((x).val >> 2) & 0x7f) +#define __swp_offset(x) ((x).val >> 9) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) + +/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +#define kern_addr_valid(addr) (1) + +/* + * We provide our own arch_get_unmapped_area to cope with VIPT caches. + */ +#define HAVE_ARCH_UNMAPPED_AREA + +/* + * remap a physical address `phys' of size `size' with page protection `prot' + * into virtual address `from' + */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASMNDS32_PGTABLE_H */ diff --git a/arch/nds32/include/asm/pmu.h b/arch/nds32/include/asm/pmu.h new file mode 100644 index 000000000..e1ac0b0b8 --- /dev/null +++ b/arch/nds32/include/asm/pmu.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2008-2018 Andes Technology Corporation */ + +#ifndef __ASM_PMU_H +#define __ASM_PMU_H + +#include <linux/interrupt.h> +#include <linux/perf_event.h> +#include <asm/unistd.h> +#include <asm/bitfield.h> + +/* Has special meaning for perf core implementation */ +#define HW_OP_UNSUPPORTED 0x0 +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0x0 + +/* Enough for both software and hardware defined events */ +#define SOFTWARE_EVENT_MASK 0xFF + +#define PFM_OFFSET_MAGIC_0 2 /* DO NOT START FROM 0 */ +#define PFM_OFFSET_MAGIC_1 (PFM_OFFSET_MAGIC_0 + 36) +#define PFM_OFFSET_MAGIC_2 (PFM_OFFSET_MAGIC_1 + 36) + +enum { PFMC0, PFMC1, PFMC2, MAX_COUNTERS }; + +u32 PFM_CTL_OVF[3] = { PFM_CTL_mskOVF0, PFM_CTL_mskOVF1, + PFM_CTL_mskOVF2 }; +u32 PFM_CTL_EN[3] = { PFM_CTL_mskEN0, PFM_CTL_mskEN1, + PFM_CTL_mskEN2 }; +u32 PFM_CTL_OFFSEL[3] = { PFM_CTL_offSEL0, PFM_CTL_offSEL1, + PFM_CTL_offSEL2 }; +u32 PFM_CTL_IE[3] = { PFM_CTL_mskIE0, PFM_CTL_mskIE1, PFM_CTL_mskIE2 }; +u32 PFM_CTL_KS[3] = { PFM_CTL_mskKS0, PFM_CTL_mskKS1, PFM_CTL_mskKS2 }; +u32 PFM_CTL_KU[3] = { PFM_CTL_mskKU0, PFM_CTL_mskKU1, PFM_CTL_mskKU2 }; +u32 PFM_CTL_SEL[3] = { PFM_CTL_mskSEL0, PFM_CTL_mskSEL1, PFM_CTL_mskSEL2 }; +/* + * Perf Events' indices + */ +#define NDS32_IDX_CYCLE_COUNTER 0 +#define NDS32_IDX_COUNTER0 1 +#define NDS32_IDX_COUNTER1 2 + +/* The events for a given PMU register set. */ +struct pmu_hw_events { + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event *events[MAX_COUNTERS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long used_mask[BITS_TO_LONGS(MAX_COUNTERS)]; + + /* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ + raw_spinlock_t pmu_lock; +}; + +struct nds32_pmu { + struct pmu pmu; + cpumask_t active_irqs; + char *name; + irqreturn_t (*handle_irq)(int irq_num, void *dev); + void (*enable)(struct perf_event *event); + void (*disable)(struct perf_event *event); + int (*get_event_idx)(struct pmu_hw_events *hw_events, + struct perf_event *event); + int (*set_event_filter)(struct hw_perf_event *evt, + struct perf_event_attr *attr); + u32 (*read_counter)(struct perf_event *event); + void (*write_counter)(struct perf_event *event, u32 val); + void (*start)(struct nds32_pmu *nds32_pmu); + void (*stop)(struct nds32_pmu *nds32_pmu); + void (*reset)(void *data); + int (*request_irq)(struct nds32_pmu *nds32_pmu, irq_handler_t handler); + void (*free_irq)(struct nds32_pmu *nds32_pmu); + int (*map_event)(struct perf_event *event); + int num_events; + atomic_t active_events; + u64 max_period; + struct platform_device *plat_device; + struct pmu_hw_events *(*get_hw_events)(void); +}; + +#define to_nds32_pmu(p) (container_of(p, struct nds32_pmu, pmu)) + +int nds32_pmu_register(struct nds32_pmu *nds32_pmu, int type); + +u64 nds32_pmu_event_update(struct perf_event *event); + +int nds32_pmu_event_set_period(struct perf_event *event); + +/* + * Common NDS32 SPAv3 event types + * + * Note: An implementation may not be able to count all of these events + * but the encodings are considered to be `reserved' in the case that + * they are not available. + * + * SEL_TOTAL_CYCLES will add an offset is due to ZERO is defined as + * NOT_SUPPORTED EVENT mapping in generic perf code. + * You will need to deal it in the event writing implementation. + */ +enum spav3_counter_0_perf_types { + SPAV3_0_SEL_BASE = -1 + PFM_OFFSET_MAGIC_0, /* counting symbol */ + SPAV3_0_SEL_TOTAL_CYCLES = 0 + PFM_OFFSET_MAGIC_0, + SPAV3_0_SEL_COMPLETED_INSTRUCTION = 1 + PFM_OFFSET_MAGIC_0, + SPAV3_0_SEL_LAST /* counting symbol */ +}; + +enum spav3_counter_1_perf_types { + SPAV3_1_SEL_BASE = -1 + PFM_OFFSET_MAGIC_1, /* counting symbol */ + SPAV3_1_SEL_TOTAL_CYCLES = 0 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_COMPLETED_INSTRUCTION = 1 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_CONDITIONAL_BRANCH = 2 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_TAKEN_CONDITIONAL_BRANCH = 3 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_PREFETCH_INSTRUCTION = 4 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_RET_INST = 5 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_JR_INST = 6 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_JAL_JRAL_INST = 7 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_NOP_INST = 8 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_SCW_INST = 9 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_ISB_DSB_INST = 10 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_CCTL_INST = 11 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_TAKEN_INTERRUPTS = 12 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_LOADS_COMPLETED = 13 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_UITLB_ACCESS = 14 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_UDTLB_ACCESS = 15 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_MTLB_ACCESS = 16 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_CODE_CACHE_ACCESS = 17 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_DATA_DEPENDENCY_STALL_CYCLES = 18 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_DATA_CACHE_MISS_STALL_CYCLES = 19 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_DATA_CACHE_ACCESS = 20 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_DATA_CACHE_MISS = 21 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_LOAD_DATA_CACHE_ACCESS = 22 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_STORE_DATA_CACHE_ACCESS = 23 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_ILM_ACCESS = 24 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_LSU_BIU_CYCLES = 25 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_HPTWK_BIU_CYCLES = 26 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_DMA_BIU_CYCLES = 27 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_CODE_CACHE_FILL_BIU_CYCLES = 28 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_LEGAL_UNALIGN_DCACHE_ACCESS = 29 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_PUSH25 = 30 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_SYSCALLS_INST = 31 + PFM_OFFSET_MAGIC_1, + SPAV3_1_SEL_LAST /* counting symbol */ +}; + +enum spav3_counter_2_perf_types { + SPAV3_2_SEL_BASE = -1 + PFM_OFFSET_MAGIC_2, /* counting symbol */ + SPAV3_2_SEL_TOTAL_CYCLES = 0 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_COMPLETED_INSTRUCTION = 1 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_CONDITIONAL_BRANCH_MISPREDICT = 2 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_TAKEN_CONDITIONAL_BRANCH_MISPREDICT = + 3 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_PREFETCH_INSTRUCTION_CACHE_HIT = 4 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_RET_MISPREDICT = 5 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_IMMEDIATE_J_INST = 6 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_MULTIPLY_INST = 7 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_16_BIT_INST = 8 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_FAILED_SCW_INST = 9 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_LD_AFTER_ST_CONFLICT_REPLAYS = 10 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_TAKEN_EXCEPTIONS = 12 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_STORES_COMPLETED = 13 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_UITLB_MISS = 14 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_UDTLB_MISS = 15 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_MTLB_MISS = 16 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_CODE_CACHE_MISS = 17 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_EMPTY_INST_QUEUE_STALL_CYCLES = 18 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_DATA_WRITE_BACK = 19 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_DATA_CACHE_MISS = 21 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_LOAD_DATA_CACHE_MISS = 22 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_STORE_DATA_CACHE_MISS = 23 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_DLM_ACCESS = 24 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_LSU_BIU_REQUEST = 25 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_HPTWK_BIU_REQUEST = 26 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_DMA_BIU_REQUEST = 27 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_CODE_CACHE_FILL_BIU_REQUEST = 28 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_EXTERNAL_EVENTS = 29 + PFM_OFFSET_MAGIC_2, + SPAV3_1_SEL_POP25 = 30 + PFM_OFFSET_MAGIC_2, + SPAV3_2_SEL_LAST /* counting symbol */ +}; + +/* Get converted event counter index */ +static inline int get_converted_event_idx(unsigned long event) +{ + int idx; + + if ((event) > SPAV3_0_SEL_BASE && event < SPAV3_0_SEL_LAST) { + idx = 0; + } else if ((event) > SPAV3_1_SEL_BASE && event < SPAV3_1_SEL_LAST) { + idx = 1; + } else if ((event) > SPAV3_2_SEL_BASE && event < SPAV3_2_SEL_LAST) { + idx = 2; + } else { + pr_err("GET_CONVERTED_EVENT_IDX PFM counter range error\n"); + return -EPERM; + } + + return idx; +} + +/* Get converted hardware event number */ +static inline u32 get_converted_evet_hw_num(u32 event) +{ + if (event > SPAV3_0_SEL_BASE && event < SPAV3_0_SEL_LAST) + event -= PFM_OFFSET_MAGIC_0; + else if (event > SPAV3_1_SEL_BASE && event < SPAV3_1_SEL_LAST) + event -= PFM_OFFSET_MAGIC_1; + else if (event > SPAV3_2_SEL_BASE && event < SPAV3_2_SEL_LAST) + event -= PFM_OFFSET_MAGIC_2; + else if (event != 0) + pr_err("GET_CONVERTED_EVENT_HW_NUM PFM counter range error\n"); + + return event; +} + +/* + * NDS32 HW events mapping + * + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned int nds32_pfm_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = SPAV3_0_SEL_TOTAL_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = SPAV3_1_SEL_COMPLETED_INSTRUCTION, + [PERF_COUNT_HW_CACHE_REFERENCES] = SPAV3_1_SEL_DATA_CACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = SPAV3_2_SEL_DATA_CACHE_MISS, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_REF_CPU_CYCLES] = HW_OP_UNSUPPORTED +}; + +static const unsigned int nds32_pfm_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = + SPAV3_1_SEL_LOAD_DATA_CACHE_ACCESS, + [C(RESULT_MISS)] = + SPAV3_2_SEL_LOAD_DATA_CACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = + SPAV3_1_SEL_STORE_DATA_CACHE_ACCESS, + [C(RESULT_MISS)] = + SPAV3_2_SEL_STORE_DATA_CACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = + CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = + SPAV3_1_SEL_CODE_CACHE_ACCESS, + [C(RESULT_MISS)] = + SPAV3_2_SEL_CODE_CACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = + SPAV3_1_SEL_CODE_CACHE_ACCESS, + [C(RESULT_MISS)] = + SPAV3_2_SEL_CODE_CACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + /* TODO: L2CC */ + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + /* NDS32 PMU does not support TLB read/write hit/miss, + * However, it can count access/miss, which mixed with read and write. + * Therefore, only READ counter will use it. + * We do as possible as we can. + */ + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = + SPAV3_1_SEL_UDTLB_ACCESS, + [C(RESULT_MISS)] = + SPAV3_2_SEL_UDTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = + CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = + SPAV3_1_SEL_UITLB_ACCESS, + [C(RESULT_MISS)] = + SPAV3_2_SEL_UITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = + CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = + CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { /* What is BPU? */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = + CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = + CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = + CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { /* What is NODE? */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = + CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = + CACHE_OP_UNSUPPORTED, + }, + }, +}; + +int nds32_pmu_map_event(struct perf_event *event, + const unsigned int (*event_map)[PERF_COUNT_HW_MAX], + const unsigned int (*cache_map)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], u32 raw_event_mask); + +#endif /* __ASM_PMU_H */ diff --git a/arch/nds32/include/asm/proc-fns.h b/arch/nds32/include/asm/proc-fns.h new file mode 100644 index 000000000..27c617fa7 --- /dev/null +++ b/arch/nds32/include/asm/proc-fns.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_PROCFNS_H__ +#define __NDS32_PROCFNS_H__ + +#ifdef __KERNEL__ +#include <asm/page.h> + +struct mm_struct; +struct vm_area_struct; +extern void cpu_proc_init(void); +extern void cpu_proc_fin(void); +extern void cpu_do_idle(void); +extern void cpu_reset(unsigned long reset); +extern void cpu_switch_mm(struct mm_struct *mm); + +extern void cpu_dcache_inval_all(void); +extern void cpu_dcache_wbinval_all(void); +extern void cpu_dcache_inval_page(unsigned long page); +extern void cpu_dcache_wb_page(unsigned long page); +extern void cpu_dcache_wbinval_page(unsigned long page); +extern void cpu_dcache_inval_range(unsigned long start, unsigned long end); +extern void cpu_dcache_wb_range(unsigned long start, unsigned long end); +extern void cpu_dcache_wbinval_range(unsigned long start, unsigned long end); + +extern void cpu_icache_inval_all(void); +extern void cpu_icache_inval_page(unsigned long page); +extern void cpu_icache_inval_range(unsigned long start, unsigned long end); + +extern void cpu_cache_wbinval_page(unsigned long page, int flushi); +extern void cpu_cache_wbinval_range(unsigned long start, + unsigned long end, int flushi); +extern void cpu_cache_wbinval_range_check(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, bool flushi, + bool wbd); + +extern void cpu_dma_wb_range(unsigned long start, unsigned long end); +extern void cpu_dma_inval_range(unsigned long start, unsigned long end); +extern void cpu_dma_wbinval_range(unsigned long start, unsigned long end); + +#endif /* __KERNEL__ */ +#endif /* __NDS32_PROCFNS_H__ */ diff --git a/arch/nds32/include/asm/processor.h b/arch/nds32/include/asm/processor.h new file mode 100644 index 000000000..b82369c76 --- /dev/null +++ b/arch/nds32/include/asm/processor.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_PROCESSOR_H +#define __ASM_NDS32_PROCESSOR_H + +#ifdef __KERNEL__ + +#include <asm/ptrace.h> +#include <asm/types.h> +#include <asm/sigcontext.h> + +#define KERNEL_STACK_SIZE PAGE_SIZE +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE + +struct cpu_context { + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long fp; + unsigned long pc; + unsigned long sp; +}; + +struct thread_struct { + struct cpu_context cpu_context; /* cpu context */ + /* fault info */ + unsigned long address; + unsigned long trap_no; + unsigned long error_code; + + struct fpu_struct fpu; +}; + +#define INIT_THREAD { } + +#ifdef __NDS32_EB__ +#define PSW_DE PSW_mskBE +#else +#define PSW_DE 0x0 +#endif + +#ifdef CONFIG_WBNA +#define PSW_valWBNA PSW_mskWBNA +#else +#define PSW_valWBNA 0x0 +#endif + +#ifdef CONFIG_HWZOL +#define PSW_valINIT (PSW_CPL_ANY | PSW_mskAEN | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_mskGIE) +#else +#define PSW_valINIT (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_mskGIE) +#endif + +#define start_thread(regs,pc,stack) \ +({ \ + memzero(regs, sizeof(struct pt_regs)); \ + forget_syscall(regs); \ + regs->ipsw = PSW_valINIT; \ + regs->ir0 = (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_SYSTEM | PSW_INTL_1); \ + regs->ipc = pc; \ + regs->sp = stack; \ +}) + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Free all resources held by a thread. */ +#define release_thread(thread) do { } while(0) +#if IS_ENABLED(CONFIG_FPU) +#if !IS_ENABLED(CONFIG_UNLAZU_FPU) +extern struct task_struct *last_task_used_math; +#endif +#endif + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +unsigned long get_wchan(struct task_struct *p); + +#define cpu_relax() barrier() + +#define task_pt_regs(task) \ + ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ + - 8) - 1) + +/* + * Create a new kernel thread + */ +extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags); + +#define KSTK_EIP(tsk) instruction_pointer(task_pt_regs(tsk)) +#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) + +#endif + +#endif /* __ASM_NDS32_PROCESSOR_H */ diff --git a/arch/nds32/include/asm/ptrace.h b/arch/nds32/include/asm/ptrace.h new file mode 100644 index 000000000..919ee2236 --- /dev/null +++ b/arch/nds32/include/asm/ptrace.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_PTRACE_H +#define __ASM_NDS32_PTRACE_H + +#include <uapi/asm/ptrace.h> + +/* + * If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing + * a syscall -- i.e., its most recent entry into the kernel from + * userspace was not via syscall, or otherwise a tracer cancelled the + * syscall. + * + * This must have the value -1, for ABI compatibility with ptrace etc. + */ +#define NO_SYSCALL (-1) +#ifndef __ASSEMBLY__ +#include <linux/types.h> + +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + long uregs[26]; + long fp; + long gp; + long lp; + long sp; + long ipc; +#if defined(CONFIG_HWZOL) + long lb; + long le; + long lc; +#else + long dummy[3]; +#endif + long syscallno; + }; + }; + long orig_r0; + long ir0; + long ipsw; + long pipsw; + long pipc; + long pp0; + long pp1; + long fucop_ctl; + long osp; +}; + +static inline bool in_syscall(struct pt_regs const *regs) +{ + return regs->syscallno != NO_SYSCALL; +} + +static inline void forget_syscall(struct pt_regs *regs) +{ + regs->syscallno = NO_SYSCALL; +} +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->uregs[0]; +} +extern void show_regs(struct pt_regs *); +/* Avoid circular header include via sched.h */ +struct task_struct; + +#define arch_has_single_step() (1) +#define user_mode(regs) (((regs)->ipsw & PSW_mskPOM) == 0) +#define interrupts_enabled(regs) (!!((regs)->ipsw & PSW_mskGIE)) +#define user_stack_pointer(regs) ((regs)->sp) +#define instruction_pointer(regs) ((regs)->ipc) +#define profile_pc(regs) instruction_pointer(regs) + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/nds32/include/asm/sfp-machine.h b/arch/nds32/include/asm/sfp-machine.h new file mode 100644 index 000000000..b1a5caa33 --- /dev/null +++ b/arch/nds32/include/asm/sfp-machine.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2005-2018 Andes Technology Corporation */ + +#include <asm/bitfield.h> + +#define _FP_W_TYPE_SIZE 32 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1)) +#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2)) + +#define _FP_MUL_MEAT_S(R, X, Y) \ + _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S, R, X, Y, umul_ppmm) +#define _FP_MUL_MEAT_D(R, X, Y) \ + _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D, R, X, Y, umul_ppmm) +#define _FP_MUL_MEAT_Q(R, X, Y) \ + _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) + +#define _FP_MUL_MEAT_DW_S(R, X, Y) \ + _FP_MUL_MEAT_DW_1_wide(_FP_WFRACBITS_S, R, X, Y, umul_ppmm) +#define _FP_MUL_MEAT_DW_D(R, X, Y) \ + _FP_MUL_MEAT_DW_2_wide(_FP_WFRACBITS_D, R, X, Y, umul_ppmm) + +#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_udiv_norm(S, R, X, Y) +#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_2_udiv(D, R, X, Y) + +#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) +#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 +#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 +#define _FP_NANSIGN_S 0 +#define _FP_NANSIGN_D 0 +#define _FP_NANSIGN_Q 0 + +#define _FP_KEEPNANFRACP 1 +#define _FP_QNANNEGATEDP 0 + +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ +do { \ + if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \ + && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R, Y); \ + } else { \ + R##_s = X##_s; \ + _FP_FRAC_COPY_##wc(R, X); \ + } \ + R##_c = FP_CLS_NAN; \ +} while (0) + +#define __FPU_FPCSR (current->thread.fpu.fpcsr) + +/* Obtain the current rounding mode. */ +#define FP_ROUNDMODE \ +({ \ + __FPU_FPCSR & FPCSR_mskRM; \ +}) + +#define FP_RND_NEAREST 0 +#define FP_RND_PINF 1 +#define FP_RND_MINF 2 +#define FP_RND_ZERO 3 + +#define FP_EX_INVALID FPCSR_mskIVO +#define FP_EX_DIVZERO FPCSR_mskDBZ +#define FP_EX_OVERFLOW FPCSR_mskOVF +#define FP_EX_UNDERFLOW FPCSR_mskUDF +#define FP_EX_INEXACT FPCSR_mskIEX + +#define SF_CEQ 2 +#define SF_CLT 1 +#define SF_CGT 3 +#define SF_CUN 4 + +#include <asm/byteorder.h> + +#ifdef __BIG_ENDIAN__ +#define __BYTE_ORDER __BIG_ENDIAN +#define __LITTLE_ENDIAN 0 +#else +#define __BYTE_ORDER __LITTLE_ENDIAN +#define __BIG_ENDIAN 0 +#endif + +#define abort() do { } while (0) +#define umul_ppmm(w1, w0, u, v) \ +do { \ + UWtype __x0, __x1, __x2, __x3; \ + UHWtype __ul, __vl, __uh, __vh; \ + \ + __ul = __ll_lowpart(u); \ + __uh = __ll_highpart(u); \ + __vl = __ll_lowpart(v); \ + __vh = __ll_highpart(v); \ + \ + __x0 = (UWtype) __ul * __vl; \ + __x1 = (UWtype) __ul * __vh; \ + __x2 = (UWtype) __uh * __vl; \ + __x3 = (UWtype) __uh * __vh; \ + \ + __x1 += __ll_highpart(__x0); \ + __x1 += __x2; \ + if (__x1 < __x2) \ + __x3 += __ll_B; \ + \ + (w1) = __x3 + __ll_highpart(__x1); \ + (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0); \ +} while (0) + +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +do { \ + UWtype __x; \ + __x = (al) + (bl); \ + (sh) = (ah) + (bh) + (__x < (al)); \ + (sl) = __x; \ +} while (0) + +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +do { \ + UWtype __x; \ + __x = (al) - (bl); \ + (sh) = (ah) - (bh) - (__x > (al)); \ + (sl) = __x; \ +} while (0) + +#define udiv_qrnnd(q, r, n1, n0, d) \ +do { \ + UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \ + __d1 = __ll_highpart(d); \ + __d0 = __ll_lowpart(d); \ + \ + __r1 = (n1) % __d1; \ + __q1 = (n1) / __d1; \ + __m = (UWtype) __q1 * __d0; \ + __r1 = __r1 * __ll_B | __ll_highpart(n0); \ + if (__r1 < __m) { \ + __q1--, __r1 += (d); \ + if (__r1 >= (d)) \ + if (__r1 < __m) \ + __q1--, __r1 += (d); \ + } \ + __r1 -= __m; \ + __r0 = __r1 % __d1; \ + __q0 = __r1 / __d1; \ + __m = (UWtype) __q0 * __d0; \ + __r0 = __r0 * __ll_B | __ll_lowpart(n0); \ + if (__r0 < __m) { \ + __q0--, __r0 += (d); \ + if (__r0 >= (d)) \ + if (__r0 < __m) \ + __q0--, __r0 += (d); \ + } \ + __r0 -= __m; \ + (q) = (UWtype) __q1 * __ll_B | __q0; \ + (r) = __r0; \ +} while (0) diff --git a/arch/nds32/include/asm/shmparam.h b/arch/nds32/include/asm/shmparam.h new file mode 100644 index 000000000..3aeee9469 --- /dev/null +++ b/arch/nds32/include/asm/shmparam.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASMNDS32_SHMPARAM_H +#define _ASMNDS32_SHMPARAM_H + +/* + * This should be the size of the virtually indexed cache/ways, + * whichever is greater since the cache aliases every size/ways + * bytes. + */ +#define SHMLBA (4 * SZ_8K) /* attach addr a multiple of this */ + +/* + * Enforce SHMLBA in shmat + */ +#define __ARCH_FORCE_SHMLBA + +#endif /* _ASMNDS32_SHMPARAM_H */ diff --git a/arch/nds32/include/asm/stacktrace.h b/arch/nds32/include/asm/stacktrace.h new file mode 100644 index 000000000..6bf7c777b --- /dev/null +++ b/arch/nds32/include/asm/stacktrace.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2008-2018 Andes Technology Corporation */ + +#ifndef __ASM_STACKTRACE_H +#define __ASM_STACKTRACE_H + +/* Kernel callchain */ +struct stackframe { + unsigned long fp; + unsigned long sp; + unsigned long lp; +}; + +/* + * struct frame_tail: User callchain + * IMPORTANT: + * This struct is used for call-stack walking, + * the order and types matters. + * Do not use array, it only stores sizeof(pointer) + * + * The details can refer to arch/arm/kernel/perf_event.c + */ +struct frame_tail { + unsigned long stack_fp; + unsigned long stack_lp; +}; + +/* For User callchain with optimize for size */ +struct frame_tail_opt_size { + unsigned long stack_r6; + unsigned long stack_fp; + unsigned long stack_gp; + unsigned long stack_lp; +}; + +extern void +get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph); + +#endif /* __ASM_STACKTRACE_H */ diff --git a/arch/nds32/include/asm/string.h b/arch/nds32/include/asm/string.h new file mode 100644 index 000000000..cae8fe16d --- /dev/null +++ b/arch/nds32/include/asm/string.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_STRING_H +#define __ASM_NDS32_STRING_H + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +extern void *memzero(void *ptr, __kernel_size_t n); +#endif diff --git a/arch/nds32/include/asm/suspend.h b/arch/nds32/include/asm/suspend.h new file mode 100644 index 000000000..6ed2418af --- /dev/null +++ b/arch/nds32/include/asm/suspend.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2008-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_SUSPEND_H +#define __ASM_NDS32_SUSPEND_H + +extern void suspend2ram(void); +extern void cpu_resume(void); +extern unsigned long wake_mask; + +#endif diff --git a/arch/nds32/include/asm/swab.h b/arch/nds32/include/asm/swab.h new file mode 100644 index 000000000..362a466f2 --- /dev/null +++ b/arch/nds32/include/asm/swab.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_SWAB_H__ +#define __NDS32_SWAB_H__ + +#include <linux/types.h> +#include <linux/compiler.h> + +static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +{ + __asm__("wsbh %0, %0\n\t" /* word swap byte within halfword */ + "rotri %0, %0, #16\n" + :"=r"(x) + :"0"(x)); + return x; +} + +static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) +{ + __asm__("wsbh %0, %0\n" /* word swap byte within halfword */ + :"=r"(x) + :"0"(x)); + return x; +} + +#define __arch_swab32(x) ___arch__swab32(x) +#define __arch_swab16(x) ___arch__swab16(x) + +#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) +#define __BYTEORDER_HAS_U64__ +#define __SWAB_64_THRU_32__ +#endif + +#endif /* __NDS32_SWAB_H__ */ diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h new file mode 100644 index 000000000..7b5180d78 --- /dev/null +++ b/arch/nds32/include/asm/syscall.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASM_NDS32_SYSCALL_H +#define _ASM_NDS32_SYSCALL_H 1 + +#include <uapi/linux/audit.h> +#include <linux/err.h> +struct task_struct; +struct pt_regs; + +/** + * syscall_get_nr - find what system call a task is executing + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * If @task is executing a system call or is at system call + * tracing about to attempt one, returns the system call number. + * If @task is not executing a system call, i.e. it's blocked + * inside the kernel for a fault or signal, returns -1. + * + * Note this returns int even on 64-bit machines. Only 32 bits of + * system call number can be meaningful. If the actual arch value + * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. + * + * It's only valid to call this when @task is known to be blocked. + */ +static inline int +syscall_get_nr(struct task_struct *task, struct pt_regs *regs) +{ + return regs->syscallno; +} + +/** + * syscall_rollback - roll back registers after an aborted system call + * @task: task of interest, must be in system call exit tracing + * @regs: task_pt_regs() of @task + * + * It's only valid to call this when @task is stopped for system + * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), + * after tracehook_report_syscall_entry() returned nonzero to prevent + * the system call from taking place. + * + * This rolls back the register state in @regs so it's as if the + * system call instruction was a no-op. The registers containing + * the system call number and arguments are as they were before the + * system call instruction. This may not be the same as what the + * register state looked like at system call entry tracing. + */ +static inline void +syscall_rollback(struct task_struct *task, struct pt_regs *regs) +{ + regs->uregs[0] = regs->orig_r0; +} + +/** + * syscall_get_error - check result of traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + unsigned long error = regs->uregs[0]; + return IS_ERR_VALUE(error) ? error : 0; +} + +/** + * syscall_get_return_value - get the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns the return value of the successful system call. + * This value is meaningless if syscall_get_error() returned nonzero. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +static inline long +syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) +{ + return regs->uregs[0]; +} + +/** + * syscall_set_return_value - change the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @error: negative error code, or zero to indicate success + * @val: user return value if @error is zero + * + * This changes the results of the system call that user mode will see. + * If @error is zero, the user sees a successful system call with a + * return value of @val. If @error is nonzero, it's a negated errno + * code; the user sees a failed system call with this errno code. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +static inline void +syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val) +{ + regs->uregs[0] = (long)error ? error : val; +} + +/** + * syscall_get_arguments - extract system call parameter values + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @args: array filled with argument values + * + * Fetches 6 arguments to the system call (from 0 through 5). The first + * argument is stored in @args[0], and so on. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +#define SYSCALL_MAX_ARGS 6 +static inline void +syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned long *args) +{ + args[0] = regs->orig_r0; + args++; + memcpy(args, ®s->uregs[0] + 1, 5 * sizeof(args[0])); +} + +/** + * syscall_set_arguments - change system call parameter value + * @task: task of interest, must be in system call entry tracing + * @regs: task_pt_regs() of @task + * @args: array of argument values to store + * + * Changes 6 arguments to the system call. The first argument gets value + * @args[0], and so on. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +static inline void +syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + const unsigned long *args) +{ + regs->orig_r0 = args[0]; + args++; + + memcpy(®s->uregs[0] + 1, args, 5 * sizeof(args[0])); +} + +static inline int +syscall_get_arch(struct task_struct *task) +{ + return IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) + ? AUDIT_ARCH_NDS32BE : AUDIT_ARCH_NDS32; +} + +#endif /* _ASM_NDS32_SYSCALL_H */ diff --git a/arch/nds32/include/asm/syscalls.h b/arch/nds32/include/asm/syscalls.h new file mode 100644 index 000000000..4e7216082 --- /dev/null +++ b/arch/nds32/include/asm/syscalls.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_SYSCALLS_H +#define __ASM_NDS32_SYSCALLS_H + +asmlinkage long sys_cacheflush(unsigned long addr, unsigned long len, unsigned int op); +asmlinkage long sys_fadvise64_64_wrapper(int fd, int advice, loff_t offset, loff_t len); +asmlinkage long sys_rt_sigreturn_wrapper(void); +asmlinkage long sys_fp_udfiex_crtl(int cmd, int act); + +#include <asm-generic/syscalls.h> + +#endif /* __ASM_NDS32_SYSCALLS_H */ diff --git a/arch/nds32/include/asm/thread_info.h b/arch/nds32/include/asm/thread_info.h new file mode 100644 index 000000000..d3967ad18 --- /dev/null +++ b/arch/nds32/include/asm/thread_info.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_THREAD_INFO_H +#define __ASM_NDS32_THREAD_INFO_H + +#ifdef __KERNEL__ + +#define THREAD_SIZE_ORDER (1) +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) + +#ifndef __ASSEMBLY__ + +struct task_struct; + +#include <asm/ptrace.h> +#include <asm/types.h> + +typedef unsigned long mm_segment_t; + +/* + * low level task data that entry.S needs immediate access to. + * __switch_to() assumes cpu_context follows immediately after cpu_domain. + */ +struct thread_info { + unsigned long flags; /* low level flags */ + __s32 preempt_count; /* 0 => preemptable, <0 => bug */ + mm_segment_t addr_limit; /* address limit */ +}; +#define INIT_THREAD_INFO(tsk) \ +{ \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ +} +#define thread_saved_pc(tsk) ((unsigned long)(tsk->thread.cpu_context.pc)) +#define thread_saved_fp(tsk) ((unsigned long)(tsk->thread.cpu_context.fp)) +#endif + +/* + * thread information flags: + * TIF_SYSCALL_TRACE - syscall trace active + * TIF_SIGPENDING - signal pending + * TIF_NEED_RESCHED - rescheduling necessary + * TIF_NOTIFY_RESUME - callback before returning to user + * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED + */ +#define TIF_SIGPENDING 1 +#define TIF_NEED_RESCHED 2 +#define TIF_SINGLESTEP 3 +#define TIF_NOTIFY_RESUME 4 /* callback before returning to user */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ +#define TIF_SYSCALL_TRACE 8 +#define TIF_POLLING_NRFLAG 17 +#define TIF_MEMDIE 18 +#define TIF_FREEZE 19 +#define TIF_RESTORE_SIGMASK 20 + +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_FREEZE (1 << TIF_FREEZE) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) + +/* + * Change these and you break ASM code in entry-common.S + */ +#define _TIF_WORK_MASK 0x000000ff +#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP) +#define _TIF_WORK_SYSCALL_LEAVE (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP) + +#endif /* __KERNEL__ */ +#endif /* __ASM_NDS32_THREAD_INFO_H */ diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h new file mode 100644 index 000000000..672603804 --- /dev/null +++ b/arch/nds32/include/asm/tlb.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASMNDS32_TLB_H +#define __ASMNDS32_TLB_H + +#include <asm-generic/tlb.h> + +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) + +#endif diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h new file mode 100644 index 000000000..97155366e --- /dev/null +++ b/arch/nds32/include/asm/tlbflush.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASMNDS32_TLBFLUSH_H +#define _ASMNDS32_TLBFLUSH_H + +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <nds32_intrinsic.h> + +static inline void local_flush_tlb_all(void) +{ + __nds32__tlbop_flua(); + __nds32__isb(); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + __nds32__tlbop_flua(); + __nds32__isb(); +} + +static inline void local_flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + while (start < end) { + __nds32__tlbop_inv(start); + __nds32__isb(); + start += PAGE_SIZE; + } +} + +void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); + +#define flush_tlb_all local_flush_tlb_all +#define flush_tlb_mm local_flush_tlb_mm +#define flush_tlb_range local_flush_tlb_range +#define flush_tlb_page local_flush_tlb_page +#define flush_tlb_kernel_range local_flush_tlb_kernel_range + +void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t * pte); + +#endif diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h new file mode 100644 index 000000000..54500e81e --- /dev/null +++ b/arch/nds32/include/asm/uaccess.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASMANDES_UACCESS_H +#define _ASMANDES_UACCESS_H + +/* + * User space memory access functions + */ +#include <linux/sched.h> +#include <asm/errno.h> +#include <asm/memory.h> +#include <asm/types.h> + +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + unsigned long insn, fixup; +}; + +extern int fixup_exception(struct pt_regs *regs); + +#define KERNEL_DS ((mm_segment_t) { ~0UL }) +#define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) + +#define get_fs() (current_thread_info()->addr_limit) +#define user_addr_max get_fs + +static inline void set_fs(mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; +} + +#define uaccess_kernel() (get_fs() == KERNEL_DS) + +#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) + +#define access_ok(addr, size) \ + __range_ok((unsigned long)addr, (unsigned long)size) +/* + * Single-value transfer routines. They automatically use the right + * size if we just have the right pointer type. Note that the functions + * which read from user space (*get_*) need to take care not to leak + * kernel data even if the calling code is buggy and fails to check + * the return value. This means zeroing out the destination variable + * or buffer on error. Normally this is done out of line by the + * fixup code, but there are a few places where it intrudes on the + * main code path. When we only write to user space, there is no + * problem. + * + * The "__xxx" versions of the user access functions do not verify the + * address space - it must have been done previously with a separate + * "access_ok()" call. + * + * The "xxx_error" versions set the third argument to EFAULT if an + * error occurs, and leave it unchanged on success. Note that these + * versions are void (ie, don't return a value as such). + */ + +#define get_user(x, ptr) \ +({ \ + long __gu_err = 0; \ + __get_user_check((x), (ptr), __gu_err); \ + __gu_err; \ +}) + +#define __get_user_error(x, ptr, err) \ +({ \ + __get_user_check((x), (ptr), (err)); \ + (void)0; \ +}) + +#define __get_user(x, ptr) \ +({ \ + long __gu_err = 0; \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + __get_user_err((x), __p, (__gu_err)); \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, err) \ +({ \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(__p, sizeof(*__p))) { \ + __get_user_err((x), __p, (err)); \ + } else { \ + (x) = 0; (err) = -EFAULT; \ + } \ +}) + +#define __get_user_err(x, ptr, err) \ +do { \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_asm("lbi", __gu_val, (ptr), (err)); \ + break; \ + case 2: \ + __get_user_asm("lhi", __gu_val, (ptr), (err)); \ + break; \ + case 4: \ + __get_user_asm("lwi", __gu_val, (ptr), (err)); \ + break; \ + case 8: \ + __get_user_asm_dword(__gu_val, (ptr), (err)); \ + break; \ + default: \ + BUILD_BUG(); \ + break; \ + } \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ +} while (0) + +#define __get_user_asm(inst, x, addr, err) \ + __asm__ __volatile__ ( \ + "1: "inst" %1,[%2]\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: move %0, %3\n" \ + " move %1, #0\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "+r" (err), "=&r" (x) \ + : "r" (addr), "i" (-EFAULT) \ + : "cc") + +#ifdef __NDS32_EB__ +#define __gu_reg_oper0 "%H1" +#define __gu_reg_oper1 "%L1" +#else +#define __gu_reg_oper0 "%L1" +#define __gu_reg_oper1 "%H1" +#endif + +#define __get_user_asm_dword(x, addr, err) \ + __asm__ __volatile__ ( \ + "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ + "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: move %0, %3\n" \ + " b 3b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .previous" \ + : "+r"(err), "=&r"(x) \ + : "r"(addr), "i"(-EFAULT) \ + : "cc") + +#define put_user(x, ptr) \ +({ \ + long __pu_err = 0; \ + __put_user_check((x), (ptr), __pu_err); \ + __pu_err; \ +}) + +#define __put_user(x, ptr) \ +({ \ + long __pu_err = 0; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + __put_user_err((x), __p, __pu_err); \ + __pu_err; \ +}) + +#define __put_user_error(x, ptr, err) \ +({ \ + __put_user_err((x), (ptr), (err)); \ + (void)0; \ +}) + +#define __put_user_check(x, ptr, err) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(__p, sizeof(*__p))) { \ + __put_user_err((x), __p, (err)); \ + } else { \ + (err) = -EFAULT; \ + } \ +}) + +#define __put_user_err(x, ptr, err) \ +do { \ + __typeof__(*(ptr)) __pu_val = (x); \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __put_user_asm("sbi", __pu_val, (ptr), (err)); \ + break; \ + case 2: \ + __put_user_asm("shi", __pu_val, (ptr), (err)); \ + break; \ + case 4: \ + __put_user_asm("swi", __pu_val, (ptr), (err)); \ + break; \ + case 8: \ + __put_user_asm_dword(__pu_val, (ptr), (err)); \ + break; \ + default: \ + BUILD_BUG(); \ + break; \ + } \ +} while (0) + +#define __put_user_asm(inst, x, addr, err) \ + __asm__ __volatile__ ( \ + "1: "inst" %1,[%2]\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: move %0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "+r" (err) \ + : "r" (x), "r" (addr), "i" (-EFAULT) \ + : "cc") + +#ifdef __NDS32_EB__ +#define __pu_reg_oper0 "%H2" +#define __pu_reg_oper1 "%L2" +#else +#define __pu_reg_oper0 "%L2" +#define __pu_reg_oper1 "%H2" +#endif + +#define __put_user_asm_dword(x, addr, err) \ + __asm__ __volatile__ ( \ + "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ + "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: move %0, %3\n" \ + " b 3b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .previous" \ + : "+r"(err) \ + : "r"(addr), "r"(x), "i"(-EFAULT) \ + : "cc") + +extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); +extern long strncpy_from_user(char *dest, const char __user * src, long count); +extern __must_check long strlen_user(const char __user * str); +extern __must_check long strnlen_user(const char __user * str, long n); +extern unsigned long __arch_copy_from_user(void *to, const void __user * from, + unsigned long n); +extern unsigned long __arch_copy_to_user(void __user * to, const void *from, + unsigned long n); + +#define raw_copy_from_user __arch_copy_from_user +#define raw_copy_to_user __arch_copy_to_user + +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER +static inline unsigned long clear_user(void __user * to, unsigned long n) +{ + if (access_ok(to, n)) + n = __arch_clear_user(to, n); + return n; +} + +static inline unsigned long __clear_user(void __user * to, unsigned long n) +{ + return __arch_clear_user(to, n); +} + +#endif /* _ASMNDS32_UACCESS_H */ diff --git a/arch/nds32/include/asm/unistd.h b/arch/nds32/include/asm/unistd.h new file mode 100644 index 000000000..bf5e2d440 --- /dev/null +++ b/arch/nds32/include/asm/unistd.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#define __ARCH_WANT_SYS_CLONE + +#include <uapi/asm/unistd.h> diff --git a/arch/nds32/include/asm/vdso.h b/arch/nds32/include/asm/vdso.h new file mode 100644 index 000000000..89b113ffc --- /dev/null +++ b/arch/nds32/include/asm/vdso.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2005-2017 Andes Technology Corporation + */ + +#ifndef __ASM_VDSO_H +#define __ASM_VDSO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include <generated/vdso-offsets.h> + +#define VDSO_SYMBOL(base, name) \ +({ \ + (unsigned long)(vdso_offset_##name + (unsigned long)(base)); \ +}) + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_VDSO_H */ diff --git a/arch/nds32/include/asm/vdso_datapage.h b/arch/nds32/include/asm/vdso_datapage.h new file mode 100644 index 000000000..74c688020 --- /dev/null +++ b/arch/nds32/include/asm/vdso_datapage.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2012 ARM Limited +// Copyright (C) 2005-2017 Andes Technology Corporation +#ifndef __ASM_VDSO_DATAPAGE_H +#define __ASM_VDSO_DATAPAGE_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +struct vdso_data { + bool cycle_count_down; /* timer cyclye counter is decrease with time */ + u32 cycle_count_offset; /* offset of timer cycle counter register */ + u32 seq_count; /* sequence count - odd during updates */ + u32 xtime_coarse_sec; /* coarse time */ + u32 xtime_coarse_nsec; + + u32 wtm_clock_sec; /* wall to monotonic offset */ + u32 wtm_clock_nsec; + u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */ + u32 cs_mult; /* clocksource multiplier */ + u32 cs_shift; /* Cycle to nanosecond divisor (power of two) */ + u32 hrtimer_res; /* hrtimer resolution */ + + u64 cs_cycle_last; /* last cycle value */ + u64 cs_mask; /* clocksource mask */ + + u64 xtime_clock_nsec; /* CLOCK_REALTIME sub-ns base */ + u32 tz_minuteswest; /* timezone info for gettimeofday(2) */ + u32 tz_dsttime; +}; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_VDSO_DATAPAGE_H */ diff --git a/arch/nds32/include/asm/vdso_timer_info.h b/arch/nds32/include/asm/vdso_timer_info.h new file mode 100644 index 000000000..328439ce3 --- /dev/null +++ b/arch/nds32/include/asm/vdso_timer_info.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +extern struct timer_info_t timer_info; +#define EMPTY_VALUE ~(0UL) +#define EMPTY_TIMER_MAPPING EMPTY_VALUE +#define EMPTY_REG_OFFSET EMPTY_VALUE + +struct timer_info_t +{ + bool cycle_count_down; + unsigned long mapping_base; + unsigned long cycle_count_reg_offset; +}; diff --git a/arch/nds32/include/asm/vermagic.h b/arch/nds32/include/asm/vermagic.h new file mode 100644 index 000000000..f772e7ba3 --- /dev/null +++ b/arch/nds32/include/asm/vermagic.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASM_VERMAGIC_H +#define _ASM_VERMAGIC_H + +#define MODULE_ARCH_VERMAGIC "NDS32v3" + +#endif /* _ASM_VERMAGIC_H */ diff --git a/arch/nds32/include/asm/vmalloc.h b/arch/nds32/include/asm/vmalloc.h new file mode 100644 index 000000000..caeed3898 --- /dev/null +++ b/arch/nds32/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_NDS32_VMALLOC_H +#define _ASM_NDS32_VMALLOC_H + +#endif /* _ASM_NDS32_VMALLOC_H */ diff --git a/arch/nds32/include/uapi/asm/Kbuild b/arch/nds32/include/uapi/asm/Kbuild new file mode 100644 index 000000000..e78470141 --- /dev/null +++ b/arch/nds32/include/uapi/asm/Kbuild @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += ucontext.h diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h new file mode 100644 index 000000000..bc0b92ab8 --- /dev/null +++ b/arch/nds32/include/uapi/asm/auxvec.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_AUXVEC_H +#define __ASM_AUXVEC_H + +/* + * This entry gives some information about the FPU initialization + * performed by the kernel. + */ +#define AT_FPUCW 18 /* Used FPU control word. */ + + +/* VDSO location */ +#define AT_SYSINFO_EHDR 33 + +#define AT_VECTOR_SIZE_ARCH 1 + +#endif diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..c264ef12c --- /dev/null +++ b/arch/nds32/include/uapi/asm/byteorder.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_BYTEORDER_H__ +#define __NDS32_BYTEORDER_H__ + +#ifdef __NDS32_EB__ +#include <linux/byteorder/big_endian.h> +#else +#include <linux/byteorder/little_endian.h> +#endif + +#endif /* __NDS32_BYTEORDER_H__ */ diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h new file mode 100644 index 000000000..31b9b439d --- /dev/null +++ b/arch/nds32/include/uapi/asm/cachectl.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 1994, 1995, 1996 by Ralf Baechle +// Copyright (C) 2005-2017 Andes Technology Corporation +#ifndef _ASM_CACHECTL +#define _ASM_CACHECTL + +/* + * Options for cacheflush system call + */ +#define ICACHE 0 /* flush instruction cache */ +#define DCACHE 1 /* writeback and flush data cache */ +#define BCACHE 2 /* flush instruction cache + writeback and flush data cache */ + +#endif /* _ASM_CACHECTL */ diff --git a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h new file mode 100644 index 000000000..f17396db1 --- /dev/null +++ b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2005-2019 Andes Technology Corporation */ +#ifndef _FP_UDF_IEX_CRTL_H +#define _FP_UDF_IEX_CRTL_H + +/* + * The cmd list of sys_fp_udfiex_crtl() + */ +/* Disable UDF or IEX trap based on the content of parameter act */ +#define DISABLE_UDF_IEX_TRAP 0 +/* Enable UDF or IEX trap based on the content of parameter act */ +#define ENABLE_UDF_IEX_TRAP 1 +/* Get current status of UDF and IEX trap */ +#define GET_UDF_IEX_TRAP 2 + +#endif /* _FP_UDF_IEX_CRTL_H */ diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h new file mode 100644 index 000000000..48d00328d --- /dev/null +++ b/arch/nds32/include/uapi/asm/param.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __ASM_NDS32_PARAM_H +#define __ASM_NDS32_PARAM_H + +#define EXEC_PAGESIZE 8192 + +#include <asm-generic/param.h> + +#endif /* __ASM_NDS32_PARAM_H */ diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..d76217c7c --- /dev/null +++ b/arch/nds32/include/uapi/asm/ptrace.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __UAPI_ASM_NDS32_PTRACE_H +#define __UAPI_ASM_NDS32_PTRACE_H + +#ifndef __ASSEMBLY__ + +/* + * User structures for general purpose register. + */ +struct user_pt_regs { + long uregs[26]; + long fp; + long gp; + long lp; + long sp; + long ipc; + long lb; + long le; + long lc; + long syscallno; +}; +#endif +#endif diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..6c1e66488 --- /dev/null +++ b/arch/nds32/include/uapi/asm/sigcontext.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef _ASMNDS32_SIGCONTEXT_H +#define _ASMNDS32_SIGCONTEXT_H + +/* + * Signal context structure - contains all info to do with the state + * before the signal handler was invoked. Note: only add new entries + * to the end of the structure. + */ +struct fpu_struct { + unsigned long long fd_regs[32]; + unsigned long fpcsr; + /* + * When CONFIG_SUPPORT_DENORMAL_ARITHMETIC is defined, kernel prevents + * hardware from treating the denormalized output as an underflow case + * and rounding it to a normal number. Hence kernel enables the UDF and + * IEX trap in the fpcsr register to step in the calculation. + * However, the UDF and IEX trap enable bit in $fpcsr also lose + * their use. + * + * UDF_IEX_trap replaces the feature of UDF and IEX trap enable bit in + * $fpcsr to control the trap of underflow and inexact. The bit filed + * of UDF_IEX_trap is the same as $fpcsr, 10th bit is used to enable UDF + * exception trapping and 11th bit is used to enable IEX exception + * trapping. + * + * UDF_IEX_trap is only modified through fp_udfiex_crtl syscall. + * Therefore, UDF_IEX_trap needn't be saved and restored in each + * context switch. + */ + unsigned long UDF_IEX_trap; +}; + +struct zol_struct { + unsigned long nds32_lc; /* $LC */ + unsigned long nds32_le; /* $LE */ + unsigned long nds32_lb; /* $LB */ +}; + +struct sigcontext { + unsigned long trap_no; + unsigned long error_code; + unsigned long oldmask; + unsigned long nds32_r0; + unsigned long nds32_r1; + unsigned long nds32_r2; + unsigned long nds32_r3; + unsigned long nds32_r4; + unsigned long nds32_r5; + unsigned long nds32_r6; + unsigned long nds32_r7; + unsigned long nds32_r8; + unsigned long nds32_r9; + unsigned long nds32_r10; + unsigned long nds32_r11; + unsigned long nds32_r12; + unsigned long nds32_r13; + unsigned long nds32_r14; + unsigned long nds32_r15; + unsigned long nds32_r16; + unsigned long nds32_r17; + unsigned long nds32_r18; + unsigned long nds32_r19; + unsigned long nds32_r20; + unsigned long nds32_r21; + unsigned long nds32_r22; + unsigned long nds32_r23; + unsigned long nds32_r24; + unsigned long nds32_r25; + unsigned long nds32_fp; /* $r28 */ + unsigned long nds32_gp; /* $r29 */ + unsigned long nds32_lp; /* $r30 */ + unsigned long nds32_sp; /* $r31 */ + unsigned long nds32_ipc; + unsigned long fault_address; + unsigned long used_math_flag; + /* FPU Registers */ + struct fpu_struct fpu; + struct zol_struct zol; +}; + +#endif diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h new file mode 100644 index 000000000..410795e28 --- /dev/null +++ b/arch/nds32/include/uapi/asm/unistd.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYNC_FILE_RANGE2 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_TIME32_SYSCALLS + +/* Use the standard ABI for syscalls */ +#include <asm-generic/unistd.h> + +/* Additional NDS32 specific syscalls. */ +#define __NR_cacheflush (__NR_arch_specific_syscall) +#define __NR_fp_udfiex_crtl (__NR_arch_specific_syscall + 1) +__SYSCALL(__NR_cacheflush, sys_cacheflush) +__SYSCALL(__NR_fp_udfiex_crtl, sys_fp_udfiex_crtl) |