From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/arc/lib/Makefile | 15 +++ arch/arc/lib/memcmp.S | 149 +++++++++++++++++++++++ arch/arc/lib/memcpy-700.S | 63 ++++++++++ arch/arc/lib/memcpy-archs-unaligned.S | 47 ++++++++ arch/arc/lib/memcpy-archs.S | 219 ++++++++++++++++++++++++++++++++++ arch/arc/lib/memset-archs.S | 143 ++++++++++++++++++++++ arch/arc/lib/memset.S | 56 +++++++++ arch/arc/lib/strchr-700.S | 130 ++++++++++++++++++++ arch/arc/lib/strcmp-archs.S | 75 ++++++++++++ arch/arc/lib/strcmp.S | 93 +++++++++++++++ arch/arc/lib/strcpy-700.S | 67 +++++++++++ arch/arc/lib/strlen.S | 80 +++++++++++++ 12 files changed, 1137 insertions(+) create mode 100644 arch/arc/lib/Makefile create mode 100644 arch/arc/lib/memcmp.S create mode 100644 arch/arc/lib/memcpy-700.S create mode 100644 arch/arc/lib/memcpy-archs-unaligned.S create mode 100644 arch/arc/lib/memcpy-archs.S create mode 100644 arch/arc/lib/memset-archs.S create mode 100644 arch/arc/lib/memset.S create mode 100644 arch/arc/lib/strchr-700.S create mode 100644 arch/arc/lib/strcmp-archs.S create mode 100644 arch/arc/lib/strcmp.S create mode 100644 arch/arc/lib/strcpy-700.S create mode 100644 arch/arc/lib/strlen.S (limited to 'arch/arc/lib') diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile new file mode 100644 index 000000000..30158ae69 --- /dev/null +++ b/arch/arc/lib/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) +# + +lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o + +lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o +lib-$(CONFIG_ISA_ARCV2) += memset-archs.o strcmp-archs.o + +ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS +lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs-unaligned.o +else +lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs.o +endif diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S new file mode 100644 index 000000000..d6dc5e9bc --- /dev/null +++ b/arch/arc/lib/memcmp.S @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + */ + +#include + +#ifdef __LITTLE_ENDIAN__ +#define WORD2 r2 +#define SHIFT r3 +#else /* BIG ENDIAN */ +#define WORD2 r3 +#define SHIFT r2 +#endif + +ENTRY_CFI(memcmp) + or r12,r0,r1 + asl_s r12,r12,30 + sub r3,r2,1 + brls r2,r12,.Lbytewise + ld r4,[r0,0] + ld r5,[r1,0] + lsr.f lp_count,r3,3 +#ifdef CONFIG_ISA_ARCV2 + /* In ARCv2 a branch can't be the last instruction in a zero overhead + * loop. + * So we move the branch to the start of the loop, duplicate it + * after the end, and set up r12 so that the branch isn't taken + * initially. + */ + mov_s r12,WORD2 + lpne .Loop_end + brne WORD2,r12,.Lodd + ld WORD2,[r0,4] +#else + lpne .Loop_end + ld_s WORD2,[r0,4] +#endif + ld_s r12,[r1,4] + brne r4,r5,.Leven + ld.a r4,[r0,8] + ld.a r5,[r1,8] +#ifdef CONFIG_ISA_ARCV2 +.Loop_end: + brne WORD2,r12,.Lodd +#else + brne WORD2,r12,.Lodd +.Loop_end: +#endif + asl_s SHIFT,SHIFT,3 + bhs_s .Last_cmp + brne r4,r5,.Leven + ld r4,[r0,4] + ld r5,[r1,4] +#ifdef __LITTLE_ENDIAN__ + nop_s + ; one more load latency cycle +.Last_cmp: + xor r0,r4,r5 + bset r0,r0,SHIFT + sub_s r1,r0,1 + bic_s r1,r1,r0 + norm r1,r1 + b.d .Leven_cmp + and r1,r1,24 +.Leven: + xor r0,r4,r5 + sub_s r1,r0,1 + bic_s r1,r1,r0 + norm r1,r1 + ; slow track insn + and r1,r1,24 +.Leven_cmp: + asl r2,r4,r1 + asl r12,r5,r1 + lsr_s r2,r2,1 + lsr_s r12,r12,1 + j_s.d [blink] + sub r0,r2,r12 + .balign 4 +.Lodd: + xor r0,WORD2,r12 + sub_s r1,r0,1 + bic_s r1,r1,r0 + norm r1,r1 + ; slow track insn + and r1,r1,24 + asl_s r2,r2,r1 + asl_s r12,r12,r1 + lsr_s r2,r2,1 + lsr_s r12,r12,1 + j_s.d [blink] + sub r0,r2,r12 +#else /* BIG ENDIAN */ +.Last_cmp: + neg_s SHIFT,SHIFT + lsr r4,r4,SHIFT + lsr r5,r5,SHIFT + ; slow track insn +.Leven: + sub.f r0,r4,r5 + mov.ne r0,1 + j_s.d [blink] + bset.cs r0,r0,31 +.Lodd: + cmp_s WORD2,r12 + mov_s r0,1 + j_s.d [blink] + bset.cs r0,r0,31 +#endif /* ENDIAN */ + .balign 4 +.Lbytewise: + breq r2,0,.Lnil + ldb r4,[r0,0] + ldb r5,[r1,0] + lsr.f lp_count,r3 +#ifdef CONFIG_ISA_ARCV2 + mov r12,r3 + lpne .Lbyte_end + brne r3,r12,.Lbyte_odd +#else + lpne .Lbyte_end +#endif + ldb_s r3,[r0,1] + ldb r12,[r1,1] + brne r4,r5,.Lbyte_even + ldb.a r4,[r0,2] + ldb.a r5,[r1,2] +#ifdef CONFIG_ISA_ARCV2 +.Lbyte_end: + brne r3,r12,.Lbyte_odd +#else + brne r3,r12,.Lbyte_odd +.Lbyte_end: +#endif + bcc .Lbyte_even + brne r4,r5,.Lbyte_even + ldb_s r3,[r0,1] + ldb_s r12,[r1,1] +.Lbyte_odd: + j_s.d [blink] + sub r0,r3,r12 +.Lbyte_even: + j_s.d [blink] + sub r0,r4,r5 +.Lnil: + j_s.d [blink] + mov r0,0 +END_CFI(memcmp) diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S new file mode 100644 index 000000000..f2e239e21 --- /dev/null +++ b/arch/arc/lib/memcpy-700.S @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + */ + +#include + +ENTRY_CFI(memcpy) + or r3,r0,r1 + asl_s r3,r3,30 + mov_s r5,r0 + brls.d r2,r3,.Lcopy_bytewise + sub.f r3,r2,1 + ld_s r12,[r1,0] + asr.f lp_count,r3,3 + bbit0.d r3,2,.Lnox4 + bmsk_s r2,r2,1 + st.ab r12,[r5,4] + ld.a r12,[r1,4] +.Lnox4: + lppnz .Lendloop + ld_s r3,[r1,4] + st.ab r12,[r5,4] + ld.a r12,[r1,8] + st.ab r3,[r5,4] +.Lendloop: + breq r2,0,.Last_store + ld r3,[r5,0] +#ifdef __LITTLE_ENDIAN__ + add3 r2,-1,r2 + ; uses long immediate + xor_s r12,r12,r3 + bmsk r12,r12,r2 + xor_s r12,r12,r3 +#else /* BIG ENDIAN */ + sub3 r2,31,r2 + ; uses long immediate + xor_s r3,r3,r12 + bmsk r3,r3,r2 + xor_s r12,r12,r3 +#endif /* ENDIAN */ +.Last_store: + j_s.d [blink] + st r12,[r5,0] + + .balign 4 +.Lcopy_bytewise: + jcs [blink] + ldb_s r12,[r1,0] + lsr.f lp_count,r3 + bhs_s .Lnox1 + stb.ab r12,[r5,1] + ldb.a r12,[r1,1] +.Lnox1: + lppnz .Lendbloop + ldb_s r3,[r1,1] + stb.ab r12,[r5,1] + ldb.a r12,[r1,2] + stb.ab r3,[r5,1] +.Lendbloop: + j_s.d [blink] + stb r12,[r5,0] +END_CFI(memcpy) diff --git a/arch/arc/lib/memcpy-archs-unaligned.S b/arch/arc/lib/memcpy-archs-unaligned.S new file mode 100644 index 000000000..28993a73f --- /dev/null +++ b/arch/arc/lib/memcpy-archs-unaligned.S @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ARCv2 memcpy implementation optimized for unaligned memory access using. + * + * Copyright (C) 2019 Synopsys + * Author: Eugeniy Paltsev + */ + +#include + +#ifdef CONFIG_ARC_HAS_LL64 +# define LOADX(DST,RX) ldd.ab DST, [RX, 8] +# define STOREX(SRC,RX) std.ab SRC, [RX, 8] +# define ZOLSHFT 5 +# define ZOLAND 0x1F +#else +# define LOADX(DST,RX) ld.ab DST, [RX, 4] +# define STOREX(SRC,RX) st.ab SRC, [RX, 4] +# define ZOLSHFT 4 +# define ZOLAND 0xF +#endif + +ENTRY_CFI(memcpy) + mov r3, r0 ; don;t clobber ret val + + lsr.f lp_count, r2, ZOLSHFT + lpnz @.Lcopy32_64bytes + ;; LOOP START + LOADX (r6, r1) + LOADX (r8, r1) + LOADX (r10, r1) + LOADX (r4, r1) + STOREX (r6, r3) + STOREX (r8, r3) + STOREX (r10, r3) + STOREX (r4, r3) +.Lcopy32_64bytes: + + and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes + lpnz @.Lcopyremainingbytes + ;; LOOP START + ldb.ab r5, [r1, 1] + stb.ab r5, [r3, 1] +.Lcopyremainingbytes: + + j [blink] +END_CFI(memcpy) diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S new file mode 100644 index 000000000..0051a84f6 --- /dev/null +++ b/arch/arc/lib/memcpy-archs.S @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) + */ + +#include + +#ifdef __LITTLE_ENDIAN__ +# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; << +# define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >> +# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM +# define MERGE_2(RX,RY,IMM) +# define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF +# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM +#else +# define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >> +# define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; << +# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; << +# define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; << +# define EXTRACT_1(RX,RY,IMM) lsr RX, RY, IMM +# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08 +#endif + +#ifdef CONFIG_ARC_HAS_LL64 +# define LOADX(DST,RX) ldd.ab DST, [RX, 8] +# define STOREX(SRC,RX) std.ab SRC, [RX, 8] +# define ZOLSHFT 5 +# define ZOLAND 0x1F +#else +# define LOADX(DST,RX) ld.ab DST, [RX, 4] +# define STOREX(SRC,RX) st.ab SRC, [RX, 4] +# define ZOLSHFT 4 +# define ZOLAND 0xF +#endif + +ENTRY_CFI(memcpy) + mov.f 0, r2 +;;; if size is zero + jz.d [blink] + mov r3, r0 ; don;t clobber ret val + +;;; if size <= 8 + cmp r2, 8 + bls.d @.Lsmallchunk + mov.f lp_count, r2 + + and.f r4, r0, 0x03 + rsub lp_count, r4, 4 + lpnz @.Laligndestination + ;; LOOP BEGIN + ldb.ab r5, [r1,1] + sub r2, r2, 1 + stb.ab r5, [r3,1] +.Laligndestination: + +;;; Check the alignment of the source + and.f r4, r1, 0x03 + bnz.d @.Lsourceunaligned + +;;; CASE 0: Both source and destination are 32bit aligned +;;; Convert len to Dwords, unfold x4 + lsr.f lp_count, r2, ZOLSHFT + lpnz @.Lcopy32_64bytes + ;; LOOP START + LOADX (r6, r1) + LOADX (r8, r1) + LOADX (r10, r1) + LOADX (r4, r1) + STOREX (r6, r3) + STOREX (r8, r3) + STOREX (r10, r3) + STOREX (r4, r3) +.Lcopy32_64bytes: + + and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes +.Lsmallchunk: + lpnz @.Lcopyremainingbytes + ;; LOOP START + ldb.ab r5, [r1,1] + stb.ab r5, [r3,1] +.Lcopyremainingbytes: + + j [blink] +;;; END CASE 0 + +.Lsourceunaligned: + cmp r4, 2 + beq.d @.LunalignedOffby2 + sub r2, r2, 1 + + bhi.d @.LunalignedOffby3 + ldb.ab r5, [r1, 1] + +;;; CASE 1: The source is unaligned, off by 1 + ;; Hence I need to read 1 byte for a 16bit alignment + ;; and 2bytes to reach 32bit alignment + ldh.ab r6, [r1, 2] + sub r2, r2, 2 + ;; Convert to words, unfold x2 + lsr.f lp_count, r2, 3 + MERGE_1 (r6, r6, 8) + MERGE_2 (r5, r5, 24) + or r5, r5, r6 + + ;; Both src and dst are aligned + lpnz @.Lcopy8bytes_1 + ;; LOOP START + ld.ab r6, [r1, 4] + ld.ab r8, [r1,4] + + SHIFT_1 (r7, r6, 24) + or r7, r7, r5 + SHIFT_2 (r5, r6, 8) + + SHIFT_1 (r9, r8, 24) + or r9, r9, r5 + SHIFT_2 (r5, r8, 8) + + st.ab r7, [r3, 4] + st.ab r9, [r3, 4] +.Lcopy8bytes_1: + + ;; Write back the remaining 16bits + EXTRACT_1 (r6, r5, 16) + sth.ab r6, [r3, 2] + ;; Write back the remaining 8bits + EXTRACT_2 (r5, r5, 16) + stb.ab r5, [r3, 1] + + and.f lp_count, r2, 0x07 ;Last 8bytes + lpnz @.Lcopybytewise_1 + ;; LOOP START + ldb.ab r6, [r1,1] + stb.ab r6, [r3,1] +.Lcopybytewise_1: + j [blink] + +.LunalignedOffby2: +;;; CASE 2: The source is unaligned, off by 2 + ldh.ab r5, [r1, 2] + sub r2, r2, 1 + + ;; Both src and dst are aligned + ;; Convert to words, unfold x2 + lsr.f lp_count, r2, 3 +#ifdef __BIG_ENDIAN__ + asl.nz r5, r5, 16 +#endif + lpnz @.Lcopy8bytes_2 + ;; LOOP START + ld.ab r6, [r1, 4] + ld.ab r8, [r1,4] + + SHIFT_1 (r7, r6, 16) + or r7, r7, r5 + SHIFT_2 (r5, r6, 16) + + SHIFT_1 (r9, r8, 16) + or r9, r9, r5 + SHIFT_2 (r5, r8, 16) + + st.ab r7, [r3, 4] + st.ab r9, [r3, 4] +.Lcopy8bytes_2: + +#ifdef __BIG_ENDIAN__ + lsr.nz r5, r5, 16 +#endif + sth.ab r5, [r3, 2] + + and.f lp_count, r2, 0x07 ;Last 8bytes + lpnz @.Lcopybytewise_2 + ;; LOOP START + ldb.ab r6, [r1,1] + stb.ab r6, [r3,1] +.Lcopybytewise_2: + j [blink] + +.LunalignedOffby3: +;;; CASE 3: The source is unaligned, off by 3 +;;; Hence, I need to read 1byte for achieve the 32bit alignment + + ;; Both src and dst are aligned + ;; Convert to words, unfold x2 + lsr.f lp_count, r2, 3 +#ifdef __BIG_ENDIAN__ + asl.ne r5, r5, 24 +#endif + lpnz @.Lcopy8bytes_3 + ;; LOOP START + ld.ab r6, [r1, 4] + ld.ab r8, [r1,4] + + SHIFT_1 (r7, r6, 8) + or r7, r7, r5 + SHIFT_2 (r5, r6, 24) + + SHIFT_1 (r9, r8, 8) + or r9, r9, r5 + SHIFT_2 (r5, r8, 24) + + st.ab r7, [r3, 4] + st.ab r9, [r3, 4] +.Lcopy8bytes_3: + +#ifdef __BIG_ENDIAN__ + lsr.nz r5, r5, 24 +#endif + stb.ab r5, [r3, 1] + + and.f lp_count, r2, 0x07 ;Last 8bytes + lpnz @.Lcopybytewise_3 + ;; LOOP START + ldb.ab r6, [r1,1] + stb.ab r6, [r3,1] +.Lcopybytewise_3: + j [blink] + +END_CFI(memcpy) diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S new file mode 100644 index 000000000..d2e09fece --- /dev/null +++ b/arch/arc/lib/memset-archs.S @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) + */ + +#include +#include + +/* + * The memset implementation below is optimized to use prefetchw and prealloc + * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) + * If you want to implement optimized memset for other possible L1 data cache + * line lengths (32B and 128B) you should rewrite code carefully checking + * we don't call any prefetchw/prealloc instruction for L1 cache lines which + * don't belongs to memset area. + */ + +#if L1_CACHE_SHIFT == 6 + +.macro PREALLOC_INSTR reg, off + prealloc [\reg, \off] +.endm + +.macro PREFETCHW_INSTR reg, off + prefetchw [\reg, \off] +.endm + +#else + +.macro PREALLOC_INSTR reg, off +.endm + +.macro PREFETCHW_INSTR reg, off +.endm + +#endif + +ENTRY_CFI(memset) + PREFETCHW_INSTR r0, 0 ; Prefetch the first write location + mov.f 0, r2 +;;; if size is zero + jz.d [blink] + mov r3, r0 ; don't clobber ret val + +;;; if length < 8 + brls.d.nt r2, 8, .Lsmallchunk + mov.f lp_count,r2 + + and.f r4, r0, 0x03 + rsub lp_count, r4, 4 + lpnz @.Laligndestination + ;; LOOP BEGIN + stb.ab r1, [r3,1] + sub r2, r2, 1 +.Laligndestination: + +;;; Destination is aligned + and r1, r1, 0xFF + asl r4, r1, 8 + or r4, r4, r1 + asl r5, r4, 16 + or r5, r5, r4 + mov r4, r5 + + sub3 lp_count, r2, 8 + cmp r2, 64 + bmsk.hi r2, r2, 5 + mov.ls lp_count, 0 + add3.hi r2, r2, 8 + +;;; Convert len to Dwords, unfold x8 + lsr.f lp_count, lp_count, 6 + + lpnz @.Lset64bytes + ;; LOOP START + PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching + +#ifdef CONFIG_ARC_HAS_LL64 + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif +.Lset64bytes: + + lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes + lpnz .Lset32bytes + ;; LOOP START +#ifdef CONFIG_ARC_HAS_LL64 + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif +.Lset32bytes: + + and.f lp_count, r2, 0x1F ;Last remaining 31 bytes +.Lsmallchunk: + lpnz .Lcopy3bytes + ;; LOOP START + stb.ab r1, [r3, 1] +.Lcopy3bytes: + + j [blink] + +END_CFI(memset) + +ENTRY_CFI(memzero) + ; adjust bzero args to memset args + mov r2, r1 + b.d memset ;tail call so need to tinker with blink + mov r1, 0 +END_CFI(memzero) diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S new file mode 100644 index 000000000..9f35960da --- /dev/null +++ b/arch/arc/lib/memset.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + */ + +#include + +#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */ + +ENTRY_CFI(memset) + mov_s r4,r0 + or r12,r0,r2 + bmsk.f r12,r12,1 + extb_s r1,r1 + asl r3,r1,8 + beq.d .Laligned + or_s r1,r1,r3 + brls r2,SMALL,.Ltiny + add r3,r2,r0 + stb r1,[r3,-1] + bclr_s r3,r3,0 + stw r1,[r3,-2] + bmsk.f r12,r0,1 + add_s r2,r2,r12 + sub.ne r2,r2,4 + stb.ab r1,[r4,1] + and r4,r4,-2 + stw.ab r1,[r4,2] + and r4,r4,-4 +.Laligned: ; This code address should be aligned for speed. + asl r3,r1,16 + lsr.f lp_count,r2,2 + or_s r1,r1,r3 + lpne .Loop_end + st.ab r1,[r4,4] +.Loop_end: + j_s [blink] + + .balign 4 +.Ltiny: + mov.f lp_count,r2 + lpne .Ltiny_end + stb.ab r1,[r4,1] +.Ltiny_end: + j_s [blink] +END_CFI(memset) + +; memzero: @r0 = mem, @r1 = size_t +; memset: @r0 = mem, @r1 = char, @r2 = size_t + +ENTRY_CFI(memzero) + ; adjust bzero args to memset args + mov r2, r1 + mov r1, 0 + b memset ;tail call so need to tinker with blink +END_CFI(memzero) diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S new file mode 100644 index 000000000..d52e2833f --- /dev/null +++ b/arch/arc/lib/strchr-700.S @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + */ + +/* ARC700 has a relatively long pipeline and branch prediction, so we want + to avoid branches that are hard to predict. On the other hand, the + presence of the norm instruction makes it easier to operate on whole + words branch-free. */ + +#include + +ENTRY_CFI(strchr) + extb_s r1,r1 + asl r5,r1,8 + bmsk r2,r0,1 + or r5,r5,r1 + mov_s r3,0x01010101 + breq.d r2,r0,.Laligned + asl r4,r5,16 + sub_s r0,r0,r2 + asl r7,r2,3 + ld_s r2,[r0] +#ifdef __LITTLE_ENDIAN__ + asl r7,r3,r7 +#else + lsr r7,r3,r7 +#endif + or r5,r5,r4 + ror r4,r3 + sub r12,r2,r7 + bic_s r12,r12,r2 + and r12,r12,r4 + brne.d r12,0,.Lfound0_ua + xor r6,r2,r5 + ld.a r2,[r0,4] + sub r12,r6,r7 + bic r12,r12,r6 +#ifdef __LITTLE_ENDIAN__ + and r7,r12,r4 + breq r7,0,.Loop ; For speed, we want this branch to be unaligned. + b .Lfound_char ; Likewise this one. +#else + and r12,r12,r4 + breq r12,0,.Loop ; For speed, we want this branch to be unaligned. + lsr_s r12,r12,7 + bic r2,r7,r6 + b.d .Lfound_char_b + and_s r2,r2,r12 +#endif +; /* We require this code address to be unaligned for speed... */ +.Laligned: + ld_s r2,[r0] + or r5,r5,r4 + ror r4,r3 +; /* ... so that this code address is aligned, for itself and ... */ +.Loop: + sub r12,r2,r3 + bic_s r12,r12,r2 + and r12,r12,r4 + brne.d r12,0,.Lfound0 + xor r6,r2,r5 + ld.a r2,[r0,4] + sub r12,r6,r3 + bic r12,r12,r6 + and r7,r12,r4 + breq r7,0,.Loop /* ... so that this branch is unaligned. */ + ; Found searched-for character. r0 has already advanced to next word. +#ifdef __LITTLE_ENDIAN__ +/* We only need the information about the first matching byte + (i.e. the least significant matching byte) to be exact, + hence there is no problem with carry effects. */ +.Lfound_char: + sub r3,r7,1 + bic r3,r3,r7 + norm r2,r3 + sub_s r0,r0,1 + asr_s r2,r2,3 + j.d [blink] + sub_s r0,r0,r2 + + .balign 4 +.Lfound0_ua: + mov r3,r7 +.Lfound0: + sub r3,r6,r3 + bic r3,r3,r6 + and r2,r3,r4 + or_s r12,r12,r2 + sub_s r3,r12,1 + bic_s r3,r3,r12 + norm r3,r3 + add_s r0,r0,3 + asr_s r12,r3,3 + asl.f 0,r2,r3 + sub_s r0,r0,r12 + j_s.d [blink] + mov.pl r0,0 +#else /* BIG ENDIAN */ +.Lfound_char: + lsr r7,r7,7 + + bic r2,r7,r6 +.Lfound_char_b: + norm r2,r2 + sub_s r0,r0,4 + asr_s r2,r2,3 + j.d [blink] + add_s r0,r0,r2 + +.Lfound0_ua: + mov_s r3,r7 +.Lfound0: + asl_s r2,r2,7 + or r7,r6,r4 + bic_s r12,r12,r2 + sub r2,r7,r3 + or r2,r2,r6 + bic r12,r2,r12 + bic.f r3,r4,r12 + norm r3,r3 + + add.pl r3,r3,1 + asr_s r12,r3,3 + asl.f 0,r2,r3 + add_s r0,r0,r12 + j_s.d [blink] + mov.mi r0,0 +#endif /* ENDIAN */ +END_CFI(strchr) diff --git a/arch/arc/lib/strcmp-archs.S b/arch/arc/lib/strcmp-archs.S new file mode 100644 index 000000000..7cffb3717 --- /dev/null +++ b/arch/arc/lib/strcmp-archs.S @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) + */ + +#include + +ENTRY_CFI(strcmp) + or r2, r0, r1 + bmsk_s r2, r2, 1 + brne r2, 0, @.Lcharloop + +;;; s1 and s2 are word aligned + ld.ab r2, [r0, 4] + + mov_s r12, 0x01010101 + ror r11, r12 + .align 4 +.LwordLoop: + ld.ab r3, [r1, 4] + ;; Detect NULL char in str1 + sub r4, r2, r12 + ld.ab r5, [r0, 4] + bic r4, r4, r2 + and r4, r4, r11 + brne.d.nt r4, 0, .LfoundNULL + ;; Check if the read locations are the same + cmp r2, r3 + beq.d .LwordLoop + mov.eq r2, r5 + + ;; A match is found, spot it out +#ifdef __LITTLE_ENDIAN__ + swape r3, r3 + mov_s r0, 1 + swape r2, r2 +#else + mov_s r0, 1 +#endif + cmp_s r2, r3 + j_s.d [blink] + bset.lo r0, r0, 31 + + .align 4 +.LfoundNULL: +#ifdef __BIG_ENDIAN__ + swape r4, r4 + swape r2, r2 + swape r3, r3 +#endif + ;; Find null byte + ffs r0, r4 + bmsk r2, r2, r0 + bmsk r3, r3, r0 + swape r2, r2 + swape r3, r3 + ;; make the return value + sub.f r0, r2, r3 + mov.hi r0, 1 + j_s.d [blink] + bset.lo r0, r0, 31 + + .align 4 +.Lcharloop: + ldb.ab r2, [r0, 1] + ldb.ab r3, [r1, 1] + nop + breq r2, 0, .Lcmpend + breq r2, r3, .Lcharloop + + .align 4 +.Lcmpend: + j_s.d [blink] + sub r0, r2, r3 +END_CFI(strcmp) diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S new file mode 100644 index 000000000..b20c98fb3 --- /dev/null +++ b/arch/arc/lib/strcmp.S @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + */ + +/* This is optimized primarily for the ARC700. + It would be possible to speed up the loops by one cycle / word + respective one cycle / byte by forcing double source 1 alignment, unrolling + by a factor of two, and speculatively loading the second word / byte of + source 1; however, that would increase the overhead for loop setup / finish, + and strcmp might often terminate early. */ + +#include + +ENTRY_CFI(strcmp) + or r2,r0,r1 + bmsk_s r2,r2,1 + brne r2,0,.Lcharloop + mov_s r12,0x01010101 + ror r5,r12 +.Lwordloop: + ld.ab r2,[r0,4] + ld.ab r3,[r1,4] + nop_s + sub r4,r2,r12 + bic r4,r4,r2 + and r4,r4,r5 + brne r4,0,.Lfound0 + breq r2,r3,.Lwordloop +#ifdef __LITTLE_ENDIAN__ + xor r0,r2,r3 ; mask for difference + sub_s r1,r0,1 + bic_s r0,r0,r1 ; mask for least significant difference bit + sub r1,r5,r0 + xor r0,r5,r1 ; mask for least significant difference byte + and_s r2,r2,r0 + and_s r3,r3,r0 +#endif /* LITTLE ENDIAN */ + cmp_s r2,r3 + mov_s r0,1 + j_s.d [blink] + bset.lo r0,r0,31 + + .balign 4 +#ifdef __LITTLE_ENDIAN__ +.Lfound0: + xor r0,r2,r3 ; mask for difference + or r0,r0,r4 ; or in zero indicator + sub_s r1,r0,1 + bic_s r0,r0,r1 ; mask for least significant difference bit + sub r1,r5,r0 + xor r0,r5,r1 ; mask for least significant difference byte + and_s r2,r2,r0 + and_s r3,r3,r0 + sub.f r0,r2,r3 + mov.hi r0,1 + j_s.d [blink] + bset.lo r0,r0,31 +#else /* BIG ENDIAN */ + /* The zero-detection above can mis-detect 0x01 bytes as zeroes + because of carry-propagateion from a lower significant zero byte. + We can compensate for this by checking that bit0 is zero. + This compensation is not necessary in the step where we + get a low estimate for r2, because in any affected bytes + we already have 0x00 or 0x01, which will remain unchanged + when bit 7 is cleared. */ + .balign 4 +.Lfound0: + lsr r0,r4,8 + lsr_s r1,r2 + bic_s r2,r2,r0 ; get low estimate for r2 and get ... + bic_s r0,r0,r1 ; + or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ... + cmp_s r3,r2 ; ... be independent of trailing garbage + or_s r2,r2,r0 ; likewise for r3 > r2 + bic_s r3,r3,r0 + rlc r0,0 ; r0 := r2 > r3 ? 1 : 0 + cmp_s r2,r3 + j_s.d [blink] + bset.lo r0,r0,31 +#endif /* ENDIAN */ + + .balign 4 +.Lcharloop: + ldb.ab r2,[r0,1] + ldb.ab r3,[r1,1] + nop_s + breq r2,0,.Lcmpend + breq r2,r3,.Lcharloop +.Lcmpend: + j_s.d [blink] + sub r0,r2,r3 +END_CFI(strcmp) diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S new file mode 100644 index 000000000..6e2294d13 --- /dev/null +++ b/arch/arc/lib/strcpy-700.S @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + */ + +/* If dst and src are 4 byte aligned, copy 8 bytes at a time. + If the src is 4, but not 8 byte aligned, we first read 4 bytes to get + it 8 byte aligned. Thus, we can do a little read-ahead, without + dereferencing a cache line that we should not touch. + Note that short and long instructions have been scheduled to avoid + branch stalls. + The beq_s to r3z could be made unaligned & long to avoid a stall + there, but the it is not likely to be taken often, and it + would also be likey to cost an unaligned mispredict at the next call. */ + +#include + +ENTRY_CFI(strcpy) + or r2,r0,r1 + bmsk_s r2,r2,1 + brne.d r2,0,charloop + mov_s r10,r0 + ld_s r3,[r1,0] + mov r8,0x01010101 + bbit0.d r1,2,loop_start + ror r12,r8 + sub r2,r3,r8 + bic_s r2,r2,r3 + tst_s r2,r12 + bne r3z + mov_s r4,r3 + .balign 4 +loop: + ld.a r3,[r1,4] + st.ab r4,[r10,4] +loop_start: + ld.a r4,[r1,4] + sub r2,r3,r8 + bic_s r2,r2,r3 + tst_s r2,r12 + bne_s r3z + st.ab r3,[r10,4] + sub r2,r4,r8 + bic r2,r2,r4 + tst r2,r12 + beq loop + mov_s r3,r4 +#ifdef __LITTLE_ENDIAN__ +r3z: bmsk.f r1,r3,7 + lsr_s r3,r3,8 +#else +r3z: lsr.f r1,r3,24 + asl_s r3,r3,8 +#endif + bne.d r3z + stb.ab r1,[r10,1] + j_s [blink] + + .balign 4 +charloop: + ldb.ab r3,[r1,1] + + + brne.d r3,0,charloop + stb.ab r3,[r10,1] + j [blink] +END_CFI(strcpy) diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S new file mode 100644 index 000000000..dae428ceb --- /dev/null +++ b/arch/arc/lib/strlen.S @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + */ + +#include + +ENTRY_CFI(strlen) + or r3,r0,7 + ld r2,[r3,-7] + ld.a r6,[r3,-3] + mov r4,0x01010101 + ; uses long immediate +#ifdef __LITTLE_ENDIAN__ + asl_s r1,r0,3 + btst_s r0,2 + asl r7,r4,r1 + ror r5,r4 + sub r1,r2,r7 + bic_s r1,r1,r2 + mov.eq r7,r4 + sub r12,r6,r7 + bic r12,r12,r6 + or.eq r12,r12,r1 + and r12,r12,r5 + brne r12,0,.Learly_end +#else /* BIG ENDIAN */ + ror r5,r4 + btst_s r0,2 + mov_s r1,31 + sub3 r7,r1,r0 + sub r1,r2,r4 + bic_s r1,r1,r2 + bmsk r1,r1,r7 + sub r12,r6,r4 + bic r12,r12,r6 + bmsk.ne r12,r12,r7 + or.eq r12,r12,r1 + and r12,r12,r5 + brne r12,0,.Learly_end +#endif /* ENDIAN */ + +.Loop: + ld_s r2,[r3,4] + ld.a r6,[r3,8] + ; stall for load result + sub r1,r2,r4 + bic_s r1,r1,r2 + sub r12,r6,r4 + bic r12,r12,r6 + or r12,r12,r1 + and r12,r12,r5 + breq r12,0,.Loop +.Lend: + and.f r1,r1,r5 + sub.ne r3,r3,4 + mov.eq r1,r12 +#ifdef __LITTLE_ENDIAN__ + sub_s r2,r1,1 + bic_s r2,r2,r1 + norm r1,r2 + sub_s r0,r0,3 + lsr_s r1,r1,3 + sub r0,r3,r0 + j_s.d [blink] + sub r0,r0,r1 +#else /* BIG ENDIAN */ + lsr_s r1,r1,7 + mov.eq r2,r6 + bic_s r1,r1,r2 + norm r1,r1 + sub r0,r3,r0 + lsr_s r1,r1,3 + j_s.d [blink] + add r0,r0,r1 +#endif /* ENDIAN */ +.Learly_end: + b.d .Lend + sub_s.ne r1,r1,r1 +END_CFI(strlen) -- cgit v1.2.3